file_path
stringlengths 20
207
| content
stringlengths 5
3.85M
| size
int64 5
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/docs/README.md | # Incarnate Avataar Extension
Bring in 3D outputs from Avataar's Creator Platform and Avataar's Incarnate Scanning App into Nvidia Omniverse.
## Description
This extension allows an omniverse user to directly import 3D mesh outputs from Avataar's Incarnate scanning app (available for free on Apple's App store) and directly import from Avataar's creator platform (at creator.avataar.ai.)
Key features:
1. Ease of 3D Model Creation - Avataar's mobile scanning app makes 3D model creation easy. Simply scan any object and get a 3D model mesh output that is importable into Omniverse in a matter of a few hours of processing time.
2. Ease of 3D Model import - easily download your 3D model from Avataar's creator platform (at creator.avataar.ai). You'll be able to download an .obj mesh file of the object that you scanned with the Avataar mobile app and import this 3D model into any omniverse scene.
| 904 | Markdown | 89.499991 | 270 | 0.797566 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/docs/index.rst | incarnate.omniverse
#############################
Example of Python only extension
.. toctree::
:maxdepth: 1
README
CHANGELOG
.. automodule::"incarnate.omniverse"
:platform: Windows-x86_64, Linux-x86_64
:members:
:undoc-members:
:show-inheritance:
:imported-members:
:exclude-members: contextmanager
| 339 | reStructuredText | 15.190475 | 43 | 0.625369 |
BeanSamuel/Exchange-Rate-Prediction-RL/env.py | from copy import deepcopy
from time import time
from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
import gymnasium as gym
import pandas as pd
import torch
from torch.distributions import Categorical
from utils.runner import Runner
class Actions(Enum):
Buy_NTD = 0
Buy_AUD = 1
Buy_CAD = 2
Buy_EUR = 3
Buy_GBP = 4
Buy_HKD = 5
Buy_JPY = 6
Buy_SGD = 7
Buy_USD = 8
class Positions(Enum):
# 代表持有幣別
NTD = 0
AUD = 1
CAD = 2
EUR = 3
GBP = 4
HKD = 5
JPY = 6
SGD = 7
USD = 8
def opposite(self, action):
return Positions(action)
class TradingEnv(gym.Env):
metadata = {'render_modes': ['human'], 'render_fps': 3}
def __init__(self, df, window_size, render_mode=None):
assert df.ndim == 2
assert render_mode is None or render_mode in self.metadata['render_modes']
self.render_mode = render_mode
self.df = df
self.window_size = window_size
self.prices, self.signal_features = self._process_data()
self.shape = (window_size, self.signal_features.shape[1])
# spaces
self.action_space = gym.spaces.Discrete(len(Actions))
INF = 1e10
self.observation_space = gym.spaces.Box(
low=-INF, high=INF, shape=self.shape, dtype=np.float32,
)
# episode
self._start_tick = self.window_size
self._end_tick = len(self.prices) - 1
self._truncated = None
self._current_tick = None
self._last_trade_tick = None
self._position = None
self._position_history = None
self._last_position = None
self._action = None
self._total_reward = None
self._total_profit = None
self._first_rendering = None
self.history = None
def reset(self, seed=None, options=None):
super().reset(seed=seed, options=options)
self.action_space.seed(int((self.np_random.uniform(0, seed if seed is not None else 1))))
self._truncated = False
self._current_tick = self._start_tick
self._last_trade_tick = self._current_tick - 1
self._position = Positions.NTD
self._position_history = (self.window_size * [None]) + [self._position]
self._action = 0
self._total_reward = 0.
self._total_profit = 1. # unit
self._first_rendering = True
self.history = {}
observation = self._get_observation()
info = self._get_info()
if self.render_mode == 'human':
self._render_frame()
return observation, info
def step(self, action):
# print(action)
self._action = action
self._truncated = False
self._current_tick += 1
if self._current_tick == self._end_tick:
self._truncated = True
step_reward = self._calculate_reward(action)
self._total_reward += step_reward
self._update_profit(action)
trade = False
if action != self._position.value:
trade = True
if trade:
self._last_position = self._position
self._position = self._position.opposite(action)
self._last_trade_tick = self._current_tick
self._position_history.append(self._position)
observation = self._get_observation()
info = self._get_info()
self._update_history(info)
if self.render_mode == 'human':
self._render_frame()
return observation, step_reward, self._truncated, info
def _get_info(self):
return dict(
total_reward=self._total_reward,
total_profit=self._total_profit,
position=self._position
)
def _get_observation(self):
return self.signal_features[self._current_tick - self.window_size:self._current_tick]
def _update_history(self, info):
if not self.history:
self.history = {key: [] for key in info.keys()}
for key, value in info.items():
self.history[key].append(value)
def _render_frame(self):
self.render()
def choice_price_col(self, position, buy_or_sell="買入"):
foreign_price = None
if position == Positions.AUD:
foreign_price = self.prices[f'AUD即期{buy_or_sell}'].to_numpy()
elif position == Positions.CAD:
foreign_price = self.prices[f'CAD即期{buy_or_sell}'].to_numpy()
elif position == Positions.EUR:
foreign_price = self.prices[f'EUR即期{buy_or_sell}'].to_numpy()
elif position == Positions.GBP:
foreign_price = self.prices[f'GBP即期{buy_or_sell}'].to_numpy()
elif position == Positions.HKD:
foreign_price = self.prices[f'HKD即期{buy_or_sell}'].to_numpy()
elif position == Positions.JPY:
foreign_price = self.prices[f'JPY即期{buy_or_sell}'].to_numpy()
elif position == Positions.SGD:
foreign_price = self.prices[f'SGD即期{buy_or_sell}'].to_numpy()
elif position == Positions.USD:
foreign_price = self.prices[f'USD即期{buy_or_sell}'].to_numpy()
return foreign_price
def render(self, mode='human'):
def _plot_position():
# 有買賣
if self._action != self._position.value:
# 現在不是持有台幣(即有買入外幣)
if self._position != Positions.NTD:
# 買入用紅色
buy_price_col = self.choice_price_col(self._position)
plt.scatter(self._current_tick, buy_price_col[self._current_tick], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if self._last_position != Positions.NTD:
# 賣出用綠色
sell_price_col = self.choice_price_col(self._last_position)
plt.scatter(self._current_tick, sell_price_col[self._current_tick], color='green')
start_time = time()
if self._first_rendering:
self._first_rendering = False
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
# plt.yscale('log')
plt.legend(bbox_to_anchor=(1.0, 1.0))
# 起始點標藍色
plt.scatter(self._current_tick, self.prices['AUD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['CAD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['EUR即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['GBP即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['HKD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['JPY即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['SGD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['USD即期買入'].to_numpy()[self._current_tick], color='blue')
_plot_position()
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
end_time = time()
process_time = end_time - start_time
pause_time = (1 / self.metadata['render_fps']) - process_time
assert pause_time > 0., "High FPS! Try to reduce the 'render_fps' value."
plt.pause(pause_time)
def render_all(self, title=None):
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
plt.legend(bbox_to_anchor=(1.0, 1.0))
last_positions = Positions.NTD
for i, position in enumerate(self._position_history):
if position != None:
# 有買賣
if position != last_positions:
# 現在不是持有台幣(即有買入外幣)
if position != Positions.NTD:
price_col = self.choice_price_col(position)
plt.scatter(i, price_col[i], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if last_positions != Positions.NTD:
price_col = self.choice_price_col(last_positions)
plt.scatter(i, price_col[i], color='green')
last_positions = self._position_history[i]
if title:
plt.title(title)
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
def close(self):
plt.close()
def save_rendering(self, filepath):
plt.savefig(filepath)
def pause_rendering(self):
plt.show()
def _process_data(self):
raise NotImplementedError
def _calculate_reward(self, action):
raise NotImplementedError
def _update_profit(self, action):
raise NotImplementedError
class ForexEnv(TradingEnv):
def __init__(self, cfg):
self.config = cfg
self.cfg = cfg = cfg['task']['env']
self.train_df = pd.read_csv(cfg['train_data'])
self.train_df.replace("-", 0, inplace=True)
self.test_df = pd.read_csv(cfg['test_data'])
self.test_df.replace("-", 0, inplace=True)
self.frame_bound = cfg['frame_bound']
self.num_envs = cfg['num_envs']
self.window_size = cfg['window_size']
super().__init__(self.train_df, self.window_size, None)
self.num_obs = int(np.prod(self.observation_space.shape)) + 9
self.num_actions = int(np.prod(self.action_space.shape))
self.num_values = 1
self.obs = torch.zeros([self.num_envs, self.num_obs], dtype=torch.float)
self.reset()
def reset_done(self):
if self._truncated:
Runner.logger.log({'total profit': self._total_profit})
self.obs, _ = self.reset()
self.compute_obs()
return self.obs
def compute_obs(self):
ct_obs = [0] * 9
ct_obs[self._position.value] = 1
self.obs = torch.tensor(self.obs)
obs = list(self.obs.flatten()) + ct_obs
self.obs = torch.tensor(obs, dtype=torch.float).reshape(1, self.num_obs)
def step(self, action):
self.obs, rew, reset, _ = super().step(action[0].item())
Runner.logger.log({'reward': rew})
self.compute_obs()
rew = torch.tensor(rew, dtype=torch.float).reshape(1, 1)
reset = torch.tensor(reset, dtype=torch.long).reshape(1, 1)
return self.obs, rew, reset, {}
def _update_profit(self, action):
# 有交易
if action != self._position.value:
# 原本非台幣
if self._position != Positions.NTD:
# 此處賣出為銀行方,等於投資者的買入
buy_price_col = self.choice_price_col(self._position, "賣出")
buy_price = buy_price_col[self._last_trade_tick]
# 此處買入為銀行方,等於投資者的賣出
sell_price_col = self.choice_price_col(self._position, "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
# 結束
if self._truncated:
if action != Actions.Buy_NTD.value:
buy_price_col = self.choice_price_col(Positions(action), "賣出")
buy_price = buy_price_col[self._last_trade_tick]
sell_price_col = self.choice_price_col(Positions(action), "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
def get_total_profit(self):
return self._total_profit
def _calculate_reward(self, action):
reward = 0
if self._position == Positions.NTD:
reward = 0
else:
price_col = self.choice_price_col(self._position)
current_price = price_col[self._current_tick]
last_day_price = price_col[self._current_tick-1]
reward = (current_price - last_day_price) / last_day_price
return reward * 100
# reward = 0
#
# if action != self._position.value:
# # 原本非台幣
# if self._position != Positions.NTD:
# # 此處賣出為銀行方,等於投資者的買入
# buy_price_col = self.choice_price_col(self._position, "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# # 此處買入為銀行方,等於投資者的賣出
# sell_price_col = self.choice_price_col(self._position, "買入")
# sell_price = sell_price_col[self._current_tick]
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# # 結束
# elif self._truncated:
# if action != Actions.Buy_NTD.value:
# buy_price_col = self.choice_price_col(Positions(action), "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# sell_price_col = self.choice_price_col(Positions(action), "買入")
# sell_price = sell_price_col[self._current_tick]
#
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# return reward * 1000
def _process_data(self):
start = self.frame_bound[0] - self.window_size
end = self.frame_bound[1]
prices = self.df.iloc[start:end, :].filter(like='即期')
# 這邊可修改想要使用的 feature
signal_features = self.df.iloc[:, 1:].to_numpy()[start:end]
return prices, signal_features
def test(self):
frame_bounds = [(10, 100), (10, 300), (10, 800)]
mean_profit = 0
for frame_bound in frame_bounds:
cfg = deepcopy(self.config)
cfg['task']['env']['train_data'] = self.cfg['test_data']
cfg['task']['env']['frame_bound'] = frame_bound
env = ForexEnv(cfg)
env.obs, _ = env.reset()
env.compute_obs()
while True:
action = self.agent.model.get_action(env.obs, test=True)
obs, reward, done, info = env.step(action)
if done:
mean_profit += env.get_total_profit()
break
mean_profit /= len(frame_bounds)
Runner.logger.log({'test profit': mean_profit})
return mean_profit
def save(self):
return None
def load(self, x):
pass
| 15,508 | Python | 33.851685 | 112 | 0.559195 |
BeanSamuel/Exchange-Rate-Prediction-RL/run.py | import cProfile
from utils.hydra_cfg.hydra_utils import *
from utils.runner import Runner
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def run(cfg):
Runner.init(cfg)
if cfg.profile:
cProfile.runctx("Runner.run()", globals(), locals(), "profile.pstat")
else:
Runner.run()
def runs(cfg):
#
# # policy gradient
# cfg.train.name = 'PGAgent'
#
# # reward
# cfg.train.params.config.tau = 0
# cfg.train.params.config.gamma = 0
# run(cfg)
#
# cfg.train.params.config.tau = 0.75
# cfg.train.params.config.gamma = 0.75
# run(cfg)
#
# # mlp size
# cfg.train.params.model.actor_mlp = [32, 32]
# cfg.train.params.model.critic_mlp = [32, 32]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
#
# # batch size
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 64
# run(cfg)
#
# # lr
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-2
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
# ppo
cfg.train.name = 'PPOAgent'
cfg.train.params.model.actor_mlp = [256, 256]
cfg.train.params.model.critic_mlp = [256, 256]
cfg.train.params.config.learning_rate = 1e-3
cfg.train.params.config.minibatch_size = 512
run(cfg)
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.debug:
cfg.wandb = cfg.debug == "wandb"
cfg.save = cfg.debug == "save"
cfg.task.env.num_envs = 1
runs(cfg)
elif cfg.test:
cfg.wandb = False
cfg.save = False
cfg.task.env.num_envs = 1
cfg.train.params.config.minibatch_size = 1
runs(cfg)
else:
runs(cfg)
Runner.close()
if __name__ == "__main__":
parse_hydra_configs()
| 2,138 | Python | 23.872093 | 77 | 0.606642 |
BeanSamuel/Exchange-Rate-Prediction-RL/README.md | # 2023 Artificial Intelligence Hw2 -- 多國外幣匯率預測(RL)
## 任務
利用強化學習 (Reinforcement Learning) 進行外匯投資決策
## Environment 規則
- 有 8 種外幣+台幣一共 9 種
- 某個時間點 𝑖 只能同時擁有一種幣
- 初始本金 = 1 元台幣
- **Observation**: 第 𝑖 − 10 日 ~ 第 𝑖 日的外匯資料 (𝑖 ≥ 10)
- **Action**: 買入其中一種外幣 or 買入台幣總共 9 種 actions
- 買入新貨幣時會先賣出原貨幣再買入新貨幣(都是以現鈔來看)
- 買入貨幣會以"現鈔賣出"資料,賣出貨幣會以"現鈔買入"資料做計算,中間價差即為銀行手續費。(因為現鈔賣出、現鈔買入皆為銀行面相)
- 若買入的幣別和持有的幣別相同,則不做任何動作
- **Position**: 目前持有的幣別,一共 9 種
- **Total Reward**: 持有台幣時為 0,持有其他外幣,則計算此外幣匯率第 𝑖 日和 𝑖 - 1 日的之間的漲跌幅,每天累加後的總合
- **Total Profit**: 資金成長比 = 最終本金
## 環境
Python版本: Python 3.10.9
```cmd!
pip install -r requirements.txt
```
## 模型訓練
在train.ipynb中運行以下的指令
``` cmd!
!python run.py
```
## 模型預測
在train.ipynb中運行以下的指令
```cmd!
!python run.py test=True test_data='./test.csv'py
```
| 757 | Markdown | 19.486486 | 74 | 0.690885 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/actor_critic_model.py | from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 | Python | 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/replay_buffer.py | import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
if self.is_empty():
return None
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return
def is_empty(self):
return self._total_count == 0
class ReplayBufferCPU(ReplayBuffer):
def __init__(self, buffer_size, device):
self.sample_device = device
super().__init__(buffer_size, device='cpu')
def sample(self, n):
x = super().sample(n)
if x is not None:
for k in x.keys():
x[k] = x[k].to(self.sample_device)
return x
| 2,897 | Python | 26.339622 | 90 | 0.534001 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/pg_agent.py | import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 | Python | 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/experience.py | import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 | Python | 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/utils.py | import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 | Python | 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/dataset.py | import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 | Python | 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/ppo_agent.py | import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 | Python | 33.708474 | 123 | 0.582926 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/config.yaml | experiment: ''
num_envs: ''
seed: 42
torch_deterministic: False
rl_device: 'cpu'
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# set default task and default training config based on task
defaults:
- task: Noob
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
render: False
debug: False
wandb: True
save: True
profile: False
test_data: ''
| 821 | YAML | 18.116279 | 103 | 0.74056 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/default_config.yaml |
# Task name - used to pick the class to load
task_name: HumanoidMGC
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs:
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 0 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# set default task and default training config based on task
defaults:
- task: HumanoidMGC
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
wandb: False
save: False | 1,498 | YAML | 23.57377 | 103 | 0.746996 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/task/Noob.yaml | # used to create the object
name: Noob
# if given, will override the device setting in gym.
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: './train.csv'
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
# frame_bound: [100, 1000]
frame_bound: [1850, 2850]
# frame_bound: [10, 800]
#
| 337 | YAML | 23.142855 | 60 | 0.643917 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/train/NoobPPO.yaml | name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp: [256, 256]
critic_mlp: [256, 256]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: True
normalize_value: False
normalize_advantage: True
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.9
gamma: 0.9
e_clip: 0.2
clip_value: False
learning_rate: 1e-3
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
| 712 | YAML | 19.970588 | 101 | 0.627809 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/env.py | from copy import deepcopy
from time import time
from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
import gymnasium as gym
import pandas as pd
import torch
from torch.distributions import Categorical
from utils.runner import Runner
class Actions(Enum):
Buy_NTD = 0
Buy_AUD = 1
Buy_CAD = 2
Buy_EUR = 3
Buy_GBP = 4
Buy_HKD = 5
Buy_JPY = 6
Buy_SGD = 7
Buy_USD = 8
class Positions(Enum):
# 代表持有幣別
NTD = 0
AUD = 1
CAD = 2
EUR = 3
GBP = 4
HKD = 5
JPY = 6
SGD = 7
USD = 8
def opposite(self, action):
return Positions(action)
class TradingEnv(gym.Env):
metadata = {'render_modes': ['human'], 'render_fps': 3}
def __init__(self, df, window_size, render_mode=None):
assert df.ndim == 2
assert render_mode is None or render_mode in self.metadata['render_modes']
self.render_mode = render_mode
self.df = df
self.window_size = window_size
self.prices, self.signal_features = self._process_data()
self.shape = (window_size, self.signal_features.shape[1])
# spaces
self.action_space = gym.spaces.Discrete(len(Actions))
INF = 1e10
self.observation_space = gym.spaces.Box(
low=-INF, high=INF, shape=self.shape, dtype=np.float32,
)
# episode
self._start_tick = self.window_size
self._end_tick = len(self.prices) - 1
self._truncated = None
self._current_tick = None
self._last_trade_tick = None
self._position = None
self._position_history = None
self._last_position = None
self._action = None
self._total_reward = None
self._total_profit = None
self._first_rendering = None
self.history = None
def reset(self, seed=None, options=None):
super().reset(seed=seed, options=options)
self.action_space.seed(int((self.np_random.uniform(0, seed if seed is not None else 1))))
self._truncated = False
self._current_tick = self._start_tick
self._last_trade_tick = self._current_tick - 1
self._position = Positions.NTD
self._position_history = (self.window_size * [None]) + [self._position]
self._action = 0
self._total_reward = 0.
self._total_profit = 1. # unit
self._first_rendering = True
self.history = {}
observation = self._get_observation()
info = self._get_info()
if self.render_mode == 'human':
self._render_frame()
return observation, info
def step(self, action):
# print(action)
self._action = action
self._truncated = False
self._current_tick += 1
if self._current_tick == self._end_tick:
self._truncated = True
step_reward = self._calculate_reward(action)
self._total_reward += step_reward
self._update_profit(action)
trade = False
if action != self._position.value:
trade = True
if trade:
self._last_position = self._position
self._position = self._position.opposite(action)
self._last_trade_tick = self._current_tick
self._position_history.append(self._position)
observation = self._get_observation()
info = self._get_info()
self._update_history(info)
if self.render_mode == 'human':
self._render_frame()
return observation, step_reward, self._truncated, info
def _get_info(self):
return dict(
total_reward=self._total_reward,
total_profit=self._total_profit,
position=self._position
)
def _get_observation(self):
return self.signal_features[self._current_tick - self.window_size:self._current_tick]
def _update_history(self, info):
if not self.history:
self.history = {key: [] for key in info.keys()}
for key, value in info.items():
self.history[key].append(value)
def _render_frame(self):
self.render()
def choice_price_col(self, position, buy_or_sell="買入"):
foreign_price = None
if position == Positions.AUD:
foreign_price = self.prices[f'AUD即期{buy_or_sell}'].to_numpy()
elif position == Positions.CAD:
foreign_price = self.prices[f'CAD即期{buy_or_sell}'].to_numpy()
elif position == Positions.EUR:
foreign_price = self.prices[f'EUR即期{buy_or_sell}'].to_numpy()
elif position == Positions.GBP:
foreign_price = self.prices[f'GBP即期{buy_or_sell}'].to_numpy()
elif position == Positions.HKD:
foreign_price = self.prices[f'HKD即期{buy_or_sell}'].to_numpy()
elif position == Positions.JPY:
foreign_price = self.prices[f'JPY即期{buy_or_sell}'].to_numpy()
elif position == Positions.SGD:
foreign_price = self.prices[f'SGD即期{buy_or_sell}'].to_numpy()
elif position == Positions.USD:
foreign_price = self.prices[f'USD即期{buy_or_sell}'].to_numpy()
return foreign_price
def render(self, mode='human'):
def _plot_position():
# 有買賣
if self._action != self._position.value:
# 現在不是持有台幣(即有買入外幣)
if self._position != Positions.NTD:
# 買入用紅色
buy_price_col = self.choice_price_col(self._position)
plt.scatter(self._current_tick, buy_price_col[self._current_tick], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if self._last_position != Positions.NTD:
# 賣出用綠色
sell_price_col = self.choice_price_col(self._last_position)
plt.scatter(self._current_tick, sell_price_col[self._current_tick], color='green')
start_time = time()
if self._first_rendering:
self._first_rendering = False
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
# plt.yscale('log')
plt.legend(bbox_to_anchor=(1.0, 1.0))
# 起始點標藍色
plt.scatter(self._current_tick, self.prices['AUD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['CAD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['EUR即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['GBP即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['HKD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['JPY即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['SGD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['USD即期買入'].to_numpy()[self._current_tick], color='blue')
_plot_position()
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
end_time = time()
process_time = end_time - start_time
pause_time = (1 / self.metadata['render_fps']) - process_time
assert pause_time > 0., "High FPS! Try to reduce the 'render_fps' value."
plt.pause(pause_time)
def render_all(self, title=None):
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
plt.legend(bbox_to_anchor=(1.0, 1.0))
last_positions = Positions.NTD
for i, position in enumerate(self._position_history):
if position != None:
# 有買賣
if position != last_positions:
# 現在不是持有台幣(即有買入外幣)
if position != Positions.NTD:
price_col = self.choice_price_col(position)
plt.scatter(i, price_col[i], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if last_positions != Positions.NTD:
price_col = self.choice_price_col(last_positions)
plt.scatter(i, price_col[i], color='green')
last_positions = self._position_history[i]
if title:
plt.title(title)
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
def close(self):
plt.close()
def save_rendering(self, filepath):
plt.savefig(filepath)
def pause_rendering(self):
plt.show()
def _process_data(self):
raise NotImplementedError
def _calculate_reward(self, action):
raise NotImplementedError
def _update_profit(self, action):
raise NotImplementedError
class ForexEnv(TradingEnv):
def __init__(self, cfg):
self.config = cfg
self.cfg = cfg = cfg['task']['env']
self.train_df = pd.read_csv(cfg['train_data'])
self.train_df.replace("-", 0, inplace=True)
self.test_df = pd.read_csv(cfg['test_data'])
self.test_df.replace("-", 0, inplace=True)
self.frame_bound = cfg['frame_bound']
self.num_envs = cfg['num_envs']
self.window_size = cfg['window_size']
super().__init__(self.train_df, self.window_size, None)
self.num_obs = int(np.prod(self.observation_space.shape)) + 9
self.num_actions = int(np.prod(self.action_space.shape))
self.num_values = 1
self.obs = torch.zeros([self.num_envs, self.num_obs], dtype=torch.float)
self.reset()
def reset_done(self):
if self._truncated:
Runner.logger.log({'total profit': self._total_profit})
self.obs, _ = self.reset()
self.compute_obs()
return self.obs
def compute_obs(self):
ct_obs = [0] * 9
ct_obs[self._position.value] = 1
self.obs = torch.tensor(self.obs)
obs = list(self.obs.flatten()) + ct_obs
self.obs = torch.tensor(obs, dtype=torch.float).reshape(1, self.num_obs)
def step(self, action):
self.obs, rew, reset, _ = super().step(action[0].item())
Runner.logger.log({'reward': rew})
self.compute_obs()
rew = torch.tensor(rew, dtype=torch.float).reshape(1, 1)
reset = torch.tensor(reset, dtype=torch.long).reshape(1, 1)
return self.obs, rew, reset, {}
def _update_profit(self, action):
# 有交易
if action != self._position.value:
# 原本非台幣
if self._position != Positions.NTD:
# 此處賣出為銀行方,等於投資者的買入
buy_price_col = self.choice_price_col(self._position, "賣出")
buy_price = buy_price_col[self._last_trade_tick]
# 此處買入為銀行方,等於投資者的賣出
sell_price_col = self.choice_price_col(self._position, "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
# 結束
if self._truncated:
if action != Actions.Buy_NTD.value:
buy_price_col = self.choice_price_col(Positions(action), "賣出")
buy_price = buy_price_col[self._last_trade_tick]
sell_price_col = self.choice_price_col(Positions(action), "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
def get_total_profit(self):
return self._total_profit
def _calculate_reward(self, action):
reward = 0
if self._position == Positions.NTD:
reward = 0
else:
price_col = self.choice_price_col(self._position)
current_price = price_col[self._current_tick]
last_day_price = price_col[self._current_tick-1]
reward = (current_price - last_day_price) / last_day_price
return reward * 100
# reward = 0
#
# if action != self._position.value:
# # 原本非台幣
# if self._position != Positions.NTD:
# # 此處賣出為銀行方,等於投資者的買入
# buy_price_col = self.choice_price_col(self._position, "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# # 此處買入為銀行方,等於投資者的賣出
# sell_price_col = self.choice_price_col(self._position, "買入")
# sell_price = sell_price_col[self._current_tick]
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# # 結束
# elif self._truncated:
# if action != Actions.Buy_NTD.value:
# buy_price_col = self.choice_price_col(Positions(action), "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# sell_price_col = self.choice_price_col(Positions(action), "買入")
# sell_price = sell_price_col[self._current_tick]
#
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# return reward * 1000
def _process_data(self):
start = self.frame_bound[0] - self.window_size
end = self.frame_bound[1]
prices = self.df.iloc[start:end, :].filter(like='即期')
# 這邊可修改想要使用的 feature
signal_features = self.df.iloc[:, 1:].to_numpy()[start:end]
return prices, signal_features
def test(self):
frame_bounds = [(10, 100), (10, 300), (10, 800)]
mean_profit = 0
for frame_bound in frame_bounds:
cfg = deepcopy(self.config)
cfg['task']['env']['train_data'] = self.cfg['test_data']
cfg['task']['env']['frame_bound'] = frame_bound
env = ForexEnv(cfg)
env.obs, _ = env.reset()
env.compute_obs()
while True:
action = self.agent.model.get_action(env.obs, test=True)
obs, reward, done, info = env.step(action)
if done:
mean_profit += env.get_total_profit()
break
mean_profit /= len(frame_bounds)
Runner.logger.log({'test profit': mean_profit})
return mean_profit
def save(self):
return None
def load(self, x):
pass
| 15,508 | Python | 33.851685 | 112 | 0.559195 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/run.py | import cProfile
from utils.hydra_cfg.hydra_utils import *
from utils.runner import Runner
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def run(cfg):
Runner.init(cfg)
if cfg.profile:
cProfile.runctx("Runner.run()", globals(), locals(), "profile.pstat")
else:
Runner.run()
def runs(cfg):
#
# # policy gradient
# cfg.train.name = 'PGAgent'
#
# # reward
# cfg.train.params.config.tau = 0
# cfg.train.params.config.gamma = 0
# run(cfg)
#
# cfg.train.params.config.tau = 0.75
# cfg.train.params.config.gamma = 0.75
# run(cfg)
#
# # mlp size
# cfg.train.params.model.actor_mlp = [32, 32]
# cfg.train.params.model.critic_mlp = [32, 32]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
#
# # batch size
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 64
# run(cfg)
#
# # lr
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-2
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
# ppo
cfg.train.name = 'PPOAgent'
cfg.train.params.model.actor_mlp = [256, 256]
cfg.train.params.model.critic_mlp = [256, 256]
cfg.train.params.config.learning_rate = 1e-3
cfg.train.params.config.minibatch_size = 512
run(cfg)
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.debug:
cfg.wandb = cfg.debug == "wandb"
cfg.save = cfg.debug == "save"
cfg.task.env.num_envs = 1
runs(cfg)
elif cfg.test:
cfg.wandb = False
cfg.save = False
cfg.task.env.num_envs = 1
cfg.train.params.config.minibatch_size = 1
runs(cfg)
else:
runs(cfg)
Runner.close()
if __name__ == "__main__":
parse_hydra_configs()
| 2,138 | Python | 23.872093 | 77 | 0.606642 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/actor_critic_model.py | from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 | Python | 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/replay_buffer.py | import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
if self.is_empty():
return None
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return
def is_empty(self):
return self._total_count == 0
class ReplayBufferCPU(ReplayBuffer):
def __init__(self, buffer_size, device):
self.sample_device = device
super().__init__(buffer_size, device='cpu')
def sample(self, n):
x = super().sample(n)
if x is not None:
for k in x.keys():
x[k] = x[k].to(self.sample_device)
return x
| 2,897 | Python | 26.339622 | 90 | 0.534001 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/pg_agent.py | import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 | Python | 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/experience.py | import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 | Python | 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/utils.py | import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 | Python | 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/dataset.py | import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 | Python | 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/learning/ppo_agent.py | import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 | Python | 33.708474 | 123 | 0.582926 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/cfg/config.yaml | experiment: ''
num_envs: ''
seed: 42
torch_deterministic: False
rl_device: 'cpu'
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# set default task and default training config based on task
defaults:
- task: Noob
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
render: False
debug: False
wandb: True
save: True
profile: False
test_data: ''
| 821 | YAML | 18.116279 | 103 | 0.74056 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/cfg/default_config.yaml |
# Task name - used to pick the class to load
task_name: HumanoidMGC
# experiment name. defaults to name of training config
experiment: ''
# if set to positive integer, overrides the default number of environments
num_envs:
# seed - set to -1 to choose random seed
seed: 42
# set to True for deterministic performance
torch_deterministic: False
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
## Device config
physics_engine: 'physx'
# whether to use cpu or gpu pipeline
pipeline: 'gpu'
# whether to use cpu or gpu physx
sim_device: 'gpu'
# used for gpu simulation only - device id for running sim and task if pipeline=gpu
device_id: 0
# device to run RL
rl_device: 'cuda:0'
# multi-GPU training
multi_gpu: False
## PhysX arguments
num_threads: 4 # Number of worker threads per scene used by PhysX - for CPU PhysX only.
solver_type: 0 # 0: pgs, 1: tgs
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# set default task and default training config based on task
defaults:
- task: HumanoidMGC
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
wandb: False
save: False | 1,498 | YAML | 23.57377 | 103 | 0.746996 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/cfg/task/Noob.yaml | # used to create the object
name: Noob
# if given, will override the device setting in gym.
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: './train.csv'
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
# frame_bound: [100, 1000]
frame_bound: [1850, 2850]
# frame_bound: [10, 800]
#
| 337 | YAML | 23.142855 | 60 | 0.643917 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/cfg/train/NoobPPO.yaml | name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp: [256, 256]
critic_mlp: [256, 256]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: True
normalize_value: False
normalize_advantage: True
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.9
gamma: 0.9
e_clip: 0.2
clip_value: False
learning_rate: 1e-3
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
| 712 | YAML | 19.970588 | 101 | 0.627809 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/torch_utils.py | from typing import Optional, Sequence
import torch
def to_torch_size(*size) -> torch.Size:
if len(size) == 1 and isinstance(size[0], Sequence):
torch_size = size[0]
else:
torch_size = list(size)
return torch.Size(torch_size)
| 255 | Python | 22.272725 | 56 | 0.647059 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/wandb_logger.py | import torch
import wandb
# class WandbLogger:
# def __init__(self, project, run_name, log=True):
# self.data = {}
# self.data_cnt = {}
# self.is_log = log
# if log:
# wandb.init(project=project)
# wandb.run.name = run_name
# wandb.run.save()
# def stop(self):
# wandb.finish()
# def log(self, datas: dict):
# if self.is_log:
# for k, v in datas.items():
# if isinstance(v, torch.Tensor):
# if v.nelement == 0:
# v = torch.nan
# v = v.mean().item()
# n = self.data_cnt.get(k, 0)
# x = self.data.get(k, 0)
# self.data_cnt[k] = n + 1
# self.data[k] = x * n / (n+1) + v / (n+1)
# def upload(self):
# if self.is_log:
# wandb.log(self.data)
# self.data.clear()
# self.data_cnt.clear()
class WandbLogger:
def __init__(self, project, run_name, log=True):
pass
def log(self, datas: dict):
pass
def upload(self):
pass
def stop(self):
pass | 1,176 | Python | 25.155555 | 58 | 0.443027 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/materials.py | import numpy as np
from pxr import Sdf
from omni.isaac.core.materials import omni_pbr
class OmniPBR(omni_pbr.OmniPBR):
def __init__(self, name, prim_path=None, color: list = None, opacity=None, reflection=None):
if prim_path is None:
prim_path = '/World/Looks/' + name
super().__init__(prim_path, name, color=color)
if reflection is not None:
self.set_reflection_roughness(1 - reflection)
if opacity is not None:
self.set_opacity(opacity)
def set_opacity(self, value: float):
enable_opacity = value < 1
if self.shaders_list[0].GetInput("enable_opacity").Get() is None:
self.shaders_list[0].CreateInput("enable_opacity", Sdf.ValueTypeNames.Bool).Set(enable_opacity)
else:
self.shaders_list[0].GetInput("enable_opacity").Set(enable_opacity)
if self.shaders_list[0].GetInput("opacity_constant").Get() is None:
self.shaders_list[0].CreateInput("opacity_constant", Sdf.ValueTypeNames.Float).Set(value)
else:
self.shaders_list[0].GetInput("opacity_constant").Set(value)
def set_color(self, color) -> None:
super().set_color(np.array(color))
| 1,216 | Python | 39.566665 | 107 | 0.63898 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/demo_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def initialize_demo(config, env, init_sim=True):
from omniisaacgymenvs.demos.anymal_terrain import AnymalTerrainDemo
# Mappings from strings to environments
task_map = {
"AnymalTerrain": AnymalTerrainDemo,
}
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task | 2,167 | Python | 44.166666 | 107 | 0.757268 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/runner.py | import os
import time
import torch
import shutil
import random
import numpy as np
from datetime import datetime
from utils.hydra_cfg.hydra_utils import *
from utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from utils.wandb_logger import WandbLogger
class StopException(Exception):
pass
class _Runner:
def __init__(self):
pass
def init(self, cfg):
self.cfg_dict = omegaconf_to_dict(cfg)
self.test = cfg.test
self.checkpoint = cfg.checkpoint
self.clear_cmd()
self.task_name = cfg.task.name
self.start_time = datetime.now().strftime('%Y%m%d-%H%M%S')
# create save dir
self.save = cfg.save
self.run_name = self.start_time
self.task_dir = os.path.join('./runs', self.task_name)
if self.save:
self.run_dir = os.path.join(self.task_dir, self.run_name)
os.makedirs(self.run_dir, exist_ok=True)
# set seed
cfg.seed = 42
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed_all(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# logger
self.logger = WandbLogger(self.task_name, self.start_time, cfg.wandb)
# backup code
if self.save:
code_path = './learning'
if code_path is not None:
shutil.copytree(code_path, os.path.join(self.run_dir, 'codes'))
# dump config dict
if self.save:
with open(os.path.join(self.run_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
# get env & agent
from utils.task_util import get_env_agent
self.env, self.agent = get_env_agent(self.cfg_dict)
if self.test:
if self.checkpoint == '':
self.checkpoint = max(os.listdir(self.task_dir))
# load checkpoint
if self.checkpoint:
self.load_model(self.checkpoint)
if cfg.render:
self.write_cmd('render')
def run(self):
try:
if self.test:
self.agent.test()
else:
self.agent.train()
self.stop()
except StopException:
pass
def stop(self):
self.save_model('FinalEpoch')
self.logger.stop()
raise StopException
def read_cmd(self):
try:
with open('./controller', 'r') as f:
return f.read().rstrip()
except:
return ''
def write_cmd(self, cmd):
try:
with open('./controller', 'w') as f:
return f.write(cmd)
except:
pass
def clear_cmd(self):
open('./controller', 'w').close()
def close(self):
pass
def control(self):
cmd = self.read_cmd()
if cmd == 'save':
self.clear_cmd()
self.save_model(f'Epoch{self.agent.epoch_num}')
elif cmd == 'stop':
self.stop()
elif cmd == 'record':
self.clear_cmd()
self.env.record(f'Epoch{self.agent.epoch_num}')
elif cmd == 'close':
self.stop()
self.close()
self.env.render = cmd == 'render'
def get_save_dir(self, sub_dir, epoch_dir=False):
if epoch_dir:
save_dir = os.path.join(self.run_dir, sub_dir, f'Epoch{self.agent.epoch_num}')
else:
save_dir = os.path.join(self.run_dir, sub_dir)
os.makedirs(save_dir, exist_ok=True)
return save_dir
def save_model(self, name):
if self.save:
path = os.path.join(self.get_save_dir('model'), name)
torch.save({'agent': self.agent.save(), 'env': self.env.save()}, path)
print(f'Save model to {path}')
def load_model(self, name, epoch=None):
epoch = 'FinalEpoch' if epoch is None else f'Epoch{epoch}'
model_dir = os.path.join(self.task_dir, name, 'model', epoch)
datas = torch.load(model_dir)
self.agent.load(datas['agent'])
self.env.load(datas['env'])
Runner = _Runner()
| 4,120 | Python | 26.291391 | 90 | 0.546359 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/rotation_utils.py | import sys
from torch.autograd import Variable
import torch.distributed.algorithms
sys.path.append('/home/hardy/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.core')
import numpy as np
from numpy import pi, sin, cos
import plotly.express as px
import plotly.io as pio
from utils.torch_utils import *
pio.renderers.default = "browser"
# auto-shaping
def ash(func, x, in_size):
shape = x.shape[:-1]
return func(x.view(shape + (-1, in_size))).view(shape + (-1,))
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
def rad2deg(radian_value, device=None):
return torch.rad2deg(radian_value).float().to(device)
def deg2rad(degree_value, device=None):
return torch.deg2rad(degree_value).float().to(device)
def zero_pos(shape, device=None):
return torch.zeros(to_torch_size(shape) + (3,), device=device)
def zero_pos_like(x):
return zero_pos(x.shape[:-1], x.device)
def full_pos(shape, value, device=None):
x = torch.zeros(to_torch_size(shape) + (3,), device=device)
x[:] = torch.tensor(value, device=device)
return x
def full_pos_like(x, value):
return full_pos(x.shape[:-1], value, x.device)
def identity_quat(shape, device=None):
q = torch.zeros(to_torch_size(shape) + (4,), device=device)
q[..., 0] = 1
return q
def identity_quat_like(x):
return identity_quat(x.shape[:-1], x.device)
@torch.jit.script
def quat_unit(a):
return normalize(a)
# @torch.jit.script
# def quat_mul_unnorm(a, b):
# shape = a.shape
# a = a.reshape(-1, 4)
# b = b.reshape(-1, 4)
#
# w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
# w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
# ww = (z1 + x1) * (x2 + y2)
# yy = (w1 - y1) * (w2 + z2)
# zz = (w1 + y1) * (w2 - z2)
# xx = ww + yy + zz
# qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
# w = qq - ww + (z1 - y1) * (y2 - z2)
# x = qq - xx + (x1 + w1) * (x2 + w2)
# y = qq - yy + (w1 - x1) * (y2 + z2)
# z = qq - zz + (z1 + y1) * (w2 - x2)
# quat = torch.stack([w, x, y, z], dim=-1).view(shape)
#
# return quat
# @torch.jit.script
# def quat_inverse(a):
# shape = a.shape
# a = a.reshape(-1, 4)
# return torch.cat((a[..., 0:1], -a[..., 1:]), dim=-1).view(shape)
@torch.jit.script
def quat_mul_unnorm(a, b):
w1, x1, y1, z1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
w2, x2, y2, z2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([w, x, y, z], dim=-1)
return quat
@torch.jit.script
def quat_inverse(a):
a = a.clone()
a[..., 1:] *= -1
return a
@torch.jit.script
def quat_rotate(q, v):
q_w = q[..., 0:1]
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
q_w = q[..., 0].unsqueeze(-1)
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a - b + c
@torch.jit.script
def quat_mul(q0, q1):
return quat_unit(quat_mul_unnorm(q0, q1))
@torch.jit.script
def quat_div(x, y):
return quat_mul(quat_inverse(y), x)
@torch.jit.script
def quat_diff_rad(a, b):
eps = 1e-5
b_conj = quat_inverse(b)
mul = quat_mul_unnorm(a, b_conj)
# 2 * torch.acos(torch.abs(mul[..., -1]))
return 2.0 * torch.asin(torch.clamp(torch.norm(mul[..., 1:], p=2, dim=-1), max=1-eps, min=eps-1))
@torch.jit.script
def quat_to_angle_axis(q):
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qw, qx, qy, qz = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., qw] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
eps = 1e-5
qw = q[..., 0, None].clamp(-1+eps, 1-eps)
q_axis = q[..., 1:]
angle = normalize_angle(2 * qw.acos())
axis = q_axis / torch.sqrt(1 - qw ** 2)
return angle * axis
# @torch.jit.script
# def quat_to_exp_map(q):
# # compute exponential map from quaternion
# # q must be normalized
# angle, axis = quat_to_angle_axis(q)
# exp_map = angle_axis_to_exp_map(angle, axis)
# return exp_map
# @torch.jit.script
# def exp_map_to_angle_axis(exp_map):
# min_theta = 1e-5
#
# angle = torch.norm(exp_map, dim=-1)
# angle_exp = torch.unsqueeze(angle, dim=-1)
# axis = exp_map / angle_exp
# angle = normalize_angle(angle)
#
# default_axis = torch.zeros_like(exp_map)
# default_axis[..., -1] = 1
#
# mask = angle > min_theta
# angle = torch.where(mask, angle, torch.zeros_like(angle))
# mask_expand = mask.unsqueeze(-1)
# axis = torch.where(mask_expand, axis, default_axis)
#
# return angle, axis
# @torch.jit.script
# def exp_map_to_quat(exp_map):
# angle, axis = exp_map_to_angle_axis(exp_map)
# q = quat_from_angle_axis(angle, axis)
# return q
@torch.jit.script
def exp_map_to_quat(exp_map):
eps = 1e-12
angle = torch.norm(exp_map, dim=-1, keepdim=True)
axis = exp_map / (angle + eps)
theta = normalize_angle(angle) / 2
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def quat_to_tan_norm(q):
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def quat_from_rotation_matrix(m):
m = m.unsqueeze(0)
diag0 = m[..., 0, 0]
diag1 = m[..., 1, 1]
diag2 = m[..., 2, 2]
# Math stuff.
w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
# Only modify quaternions where w > x, y, z.
c0 = (w >= x) & (w >= y) & (w >= z)
x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign()
y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign()
z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign()
# Only modify quaternions where x > w, y, z
c1 = (x >= w) & (x >= y) & (x >= z)
w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign()
y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign()
z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign()
# Only modify quaternions where y > w, x, z.
c2 = (y >= w) & (y >= x) & (y >= z)
w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign()
x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign()
z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign()
# Only modify quaternions where z > w, x, y.
c3 = (z >= w) & (z >= x) & (z >= y)
w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign()
x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign()
y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign()
return quat_unit(torch.stack([w, x, y, z], dim=-1)).squeeze(0)
@torch.jit.script
def quat_from_dir(v):
u = torch.zeros_like(v)
u[..., 2] = 1
xyz = torch.cross(u, v, dim=-1)
w = torch.sqrt((u ** 2).sum(-1) * (v ** 2).sum(-1)) + (u * v).sum(-1)
q = quat_unit(torch.cat([w.unsqueeze(-1), xyz], dim=-1))
q[q.abs().sum(-1) < 1e-6, [1]] = 1
return q
@torch.jit.script
def quat_to_tan_norm(q):
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def exp_map_mul(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_mul(q0, q1)).view(shape)
@torch.jit.script
def exp_map_div(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_div(q0, q1)).view(shape)
@torch.jit.script
def exp_map_diff_rad(e0, e1):
return quat_diff_rad(exp_map_to_quat(e0), exp_map_to_quat(e1))
@torch.jit.script
def lerp(p0, p1, t):
return (1 - t) * p0 + t * p1
# @torch.jit.script
def slerp(q0, q1, t):
qw, qx, qy, qz = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta)
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta)
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta
ratioB = torch.sin(t * half_theta) / sin_half_theta
new_q_w = ratioA * q0[..., qw:qw + 1] + ratioB * q1[..., qw:qw + 1]
new_q_x = ratioA * q0[..., qx:qx + 1] + ratioB * q1[..., qx:qx + 1]
new_q_y = ratioA * q0[..., qy:qy + 1] + ratioB * q1[..., qy:qy + 1]
new_q_z = ratioA * q0[..., qz:qz + 1] + ratioB * q1[..., qz:qz + 1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_w, new_q_x, new_q_y, new_q_z], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
@torch.jit.script
def normalize_pos(pos):
z = torch.zeros_like(pos)
z[..., 2] = 1
return z * pos.norm(p=2, dim=-1, keepdim=True)
def draw_exp_map(e):
draw_quaternion(exp_map_to_quat(e))
def draw_quaternion(q):
v = torch.Tensor([0, 0, 1]).repeat(len(q), 1)
v = quat_rotate(q, v)
fig = px.scatter_3d(x=v[:, 0], y=v[:, 1], z=v[:, 2])
fig.update_layout(
scene=dict(
xaxis=dict(range=[-1, 1]),
yaxis=dict(range=[-1, 1]),
zaxis=dict(range=[-1, 1]),
)
)
fig.update_scenes(aspectmode='cube')
fig_add_sphere(fig)
fig.show()
def random_quaternion(size):
return exp_map_to_quat((torch.rand([size, 3]) - 0.5) * 2 * torch.pi)
def fig_add_sphere(fig):
theta = np.linspace(0, 2 * pi, 120)
phi = np.linspace(0, pi, 60)
u, v = np.meshgrid(theta, phi)
xs = cos(u) * sin(v)
ys = sin(u) * sin(v)
zs = cos(v)
x, y, z = [], [], []
for t in [theta[10 * k] for k in range(12)]: # meridians:
x.extend(list(cos(t) * sin(phi)) + [None]) # None is inserted to mark the end of a meridian line
y.extend(list(sin(t) * sin(phi)) + [None])
z.extend(list(cos(phi)) + [None])
for s in [phi[6 * k] for k in range(10)]: # parallels
x.extend(list(cos(theta) * sin(s)) + [None]) # None is inserted to mark the end of a parallel line
y.extend(list(sin(theta) * sin(s)) + [None])
z.extend([cos(s)] * 120 + [None])
fig.add_surface(x=xs, y=ys, z=zs,
colorscale=[[0, '#ffffff'], [1, '#ffffff']],
showscale=False, opacity=0.5) # or opacity=1
fig.add_scatter3d(x=x, y=y, z=z, mode='lines', line_width=3, line_color='rgb(10,10,10)')
def _test_exp_map_diff_rad_grad():
n = 10000
print('testing...')
for _ in range(1000):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_diff_rad(x, torch.rand([n, 3])).mean()
y.backward()
if x.grad.isnan().any():
print(y)
print('finish')
def _test_exp_map_to_quat_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_to_quat(x).mean()
y.backward()
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_quat_to_exp_map_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]), requires_grad=True)
y = exp_map_to_quat(x)
y = quat_to_exp_map(y)
y.mean().backward()
print((y - x).sum())
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_slerp():
n = 15
q0 = random_quaternion(1).repeat(n, 1)
q1 = random_quaternion(1).repeat(n, 1)
t = torch.arange(n).float() / n
q = slerp(q0, q1, t.unsqueeze(-1))
draw_quaternion(q)
if __name__ == '__main__':
_test_quat_to_exp_map_grad()
| 15,284 | Python | 26.84153 | 107 | 0.537817 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/control_panel.py | import omni.ui as ui
def _preproc_kwargs(kwargs):
for k in kwargs.keys():
if k in ['width', 'height']:
kwargs[k] = ui.Length(kwargs[k])
return kwargs
class ControlPanel:
def __init__(self, name):
self._window = ui.Window(name, auto_resize=True)
self._components = dict()
def __getitem__(self, name):
if isinstance(name, (list, tuple)):
return [self.__getitem__(x) for x in name]
item = self._components.get(name)
if isinstance(item, ui.FloatSlider):
return item.model.get_value_as_float()
elif isinstance(item, ui.CheckBox):
return item.model.get_value_as_bool()
else:
raise IndexError
def __setitem__(self, key, value):
if isinstance(key, (list, tuple)):
for k, v in zip(key, value):
self.__setitem__(k, v)
return
item = self._components.get(key)
if isinstance(item, ui.FloatField):
item.model.set_value(value)
else:
raise IndexError
def add_slider(self, name, **kwargs):
self._components[name] = lambda: ui.FloatSlider(**_preproc_kwargs(kwargs))
def add_float(self, name, **kwargs):
self._components[name] = lambda: ui.FloatField(**_preproc_kwargs(kwargs))
def add_check_box(self, name, **kwargs):
self._components[name] = lambda: ui.CheckBox(**_preproc_kwargs(kwargs))
def build(self):
with self._window.frame:
with ui.VStack(width=150):
for k, v in self._components.items():
ui.Label(k)
self._components[k] = v()
| 1,685 | Python | 29.654545 | 82 | 0.558457 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/task_util.py | from env import ForexEnv
from learning.ppo_agent import PPOAgent
from learning.pg_agent import PGAgent
def get_env_agent(config):
env_map = {
'Noob': ForexEnv,
}
agent_map = {
'PPOAgent': PPOAgent,
'PGAgent': PGAgent
}
env = env_map[config['task']['name']](config)
agent = agent_map[config['train']['name']](params=config['train']['params'], env=env)
return env, agent
| 426 | Python | 20.349999 | 89 | 0.617371 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/domain_randomization/randomize.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import omni
import omni.replicator.core as rep
import omni.replicator.isaac as dr
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
class Randomizer():
def __init__(self, sim_config):
self._cfg = sim_config.task_config
self._config = sim_config.config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
rep.set_global_seed(self._config["seed"])
with dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
dr.physics_view.register_simulation_context(task._env._world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[view_name],
)
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
dr.physics_view.register_articulation_view(
articulation_view=task._env._world.scene._scene_registry.articulated_views[view_name],
)
for attribute, params in randomization_params["articulation_views"][view_name].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
rep.orchestrator.run()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following observations on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_reset")] = np.array(self._observations_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._observations_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following observations on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_interval")] = np.array(self._observations_dr_params["on_interval"]["distribution_parameters"])
self._observations_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"])
self._observations_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["rl_device"])
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following actions on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_reset")] = np.array(self._actions_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._actions_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following actions on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_interval")] = np.array(self._actions_dr_params["on_interval"]["distribution_parameters"])
self._actions_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"])
self._actions_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["rl_device"])
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"])
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(reset_ids), buffer.shape[1]), device=self._config["rl_device"])
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]]*dimension, [distribution_parameters[1]]*dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [[distribution_parameters[0]] * (dimension // 3), [distribution_parameters[1]] * (dimension // 3)]
else:
raise ValueError(f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions.")
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for observations {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for actions {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]})
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]})
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
return dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(size, device=device) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand(size, device=device) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3,1).T
else:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters))
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} mass", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} density", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 41,564 | Python | 70.787565 | 256 | 0.602877 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/rlgames/rlgames_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
from typing import Callable
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats. """
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info"
if isinstance(infos, dict):
if 'episode' in infos:
self.ep_infos.append(infos['episode'])
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_clear_stats(self):
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar('Episode/' + key, value, epoch_num)
self.ep_infos.clear()
for k, v in self.direct_info.items():
self.writer.add_scalar(f'{k}/frame', v, frame)
self.writer.add_scalar(f'{k}/iter', v, epoch_num)
self.writer.add_scalar(f'{k}/time', v, total_time)
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
if self.env.num_states > 0:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
| 5,149 | Python | 42.277311 | 121 | 0.642649 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/dash/live_plot.py | import os
import socket
import logging
import threading
import numpy as np
import time
import torch
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dash import Dash, html, dcc, Output, Input
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
pio.renderers.default = "browser"
callback_dict = dict()
def callback(*args, **kwargs):
def wrapped(func):
global callback_dict
callback_dict[func.__name__] = (args, kwargs)
return func
return wrapped
class LivePlot:
def __init__(self, name, titles, steps):
self.name = name
self.titles = titles
self.dim_names = list(titles.keys())
self.dim_labels = list(titles.values())
self.num_dims = len(self.dim_names)
for i, labels in enumerate(self.dim_labels):
if isinstance(labels, list):
self.dim_labels[i] = ['All'] + labels
if isinstance(labels, int):
self.dim_labels[i] = ['All'] + list(map(str, range(labels)))
self.steps = 0
self.size = steps
self.time_axis = np.arange(steps)
self.free_dim = -1
self.datas = np.full([steps] + [len(x) - 1 for x in self.dim_labels], np.nan)
self._build_app()
self._create_thread()
def _build_app(self):
dropdowns = []
for name, labels in zip(self.dim_names, self.dim_labels):
dropdowns.append(name)
options = {str(i): label for i, label in enumerate(labels)}
dropdowns.append(dcc.Dropdown(id=name, options=options, value='0'))
app = Dash(__name__)
app.layout = html.Div([
html.H1(children=self.name, style={'textAlign': 'center'}),
html.Div(dropdowns),
html.Div([
dcc.Graph(id='live-graph'),
dcc.Interval(
id='interval-component',
interval=16,
n_intervals=0
)
])
])
for func_name, (args, kwargs) in callback_dict.items():
func = getattr(self, func_name)
app.callback(*args, **kwargs)(func)
app.callback(
[Output(i, 'value') for i in self.dim_names],
[Input(i, 'value') for i in self.dim_names]
)(self._update_figure)
self._update_figure(*(['1'] * self.num_dims))
self._app = app
def _create_thread(self):
port = 8050
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)):
break
else:
port += 1
run_server = lambda: self._app.run(host='0.0.0.0', port=port)
thread = threading.Thread(target=run_server)
thread.daemon = True
thread.start()
time.sleep(0.1)
print('live plot:', self.name, f'http://localhost:{port}')
self._thread = thread
def _update_figure(self, *values, save_path=None):
values = [str(v) for v in values]
idx = [slice(None)]
titles = [' ']
# print('free dim', self.free_dim)
free_dim = -1
for i, v in enumerate(values):
if v == '0':
if free_dim == -1:
free_dim = i
else:
values[i] = '1'
if free_dim != self.free_dim and self.free_dim != -1:
values[self.free_dim] = '1'
self.free_dim = free_dim
for i in range(self.num_dims):
if values[i] == '0':
titles = self.dim_labels[i][1:]
idx.append(slice(None))
else:
idx.append(int(values[i]) - 1)
self.idx = tuple(idx)
# print(self.idx)
# print(titles)
self._updating = True
self.fig = go.FigureWidget(make_subplots(rows=len(titles), cols=1, subplot_titles=titles))
for i, data in enumerate(self._get_plot_data()):
self.fig.add_trace(go.Scatter(name='', x=self.time_axis, y=data), row=i+1, col=1)
self.fig.update_layout(height=200*len(titles)+100, template='plotly')
self._updating = False
if save_path is not None:
self.fig.write_html(save_path)
# print(values)
return values
def _get_plot_data(self):
datas = self.datas[self.idx]
return np.expand_dims(datas, 0) if datas.ndim == 1 else np.swapaxes(datas, 0, 1)
def update(self, datas):
if isinstance(datas, torch.Tensor):
datas = datas.detach().cpu().numpy()
if self.steps >= self.size:
self.time_axis += 1
self.datas[:-1] = self.datas[1:]
self.datas[-1] = datas
else:
self.datas[self.steps] = datas
self.steps += 1
while self._updating:
time.sleep(0.01)
for i, data in enumerate(self._get_plot_data()):
self.fig.data[i]['x'] = self.time_axis
self.fig.data[i]['y'] = data
@callback(
Output('live-graph', 'figure'),
Input('interval-component', 'n_intervals')
)
def _update_graph(self, n):
return self.fig
def select_labels(self, *labels):
# ToDo update selector label
self._update_figure(*labels)
def snapshot(self, dir_path, free_dim=0):
def export(labels, names):
dim = len(labels)
if dim == self.num_dims:
name = self.name + ': ' + ' '.join(names) if names else self.name
save_path = os.path.join(dir_path, name) + '.html'
self._update_figure(*labels, save_path=save_path)
else:
if dim == free_dim:
export(labels + [0], names)
else:
for i, s in enumerate(self.dim_labels[dim][1:]):
export(labels + [i+1], names + [s])
export([], [])
def save(self, dir_path):
state = self.__dict__.copy()
state.pop('_app')
state.pop('_thread')
torch.save(state, os.path.join(dir_path, self.name + '.liveplot'))
@staticmethod
def load(path):
plot = LivePlot.__new__(LivePlot)
plot.__dict__ = torch.load(path)
plot._build_app()
plot._create_thread()
return plot
if __name__ == '__main__':
plot = LivePlot('1', {'1': ['a', 'b'], '2': 5}, 30)
plot2 = LivePlot('2', {'1': ['a', 'b'], '2': 5}, 30)
import time
for i in range(10):
plot.update(np.random.random([2, 5]))
plot2.update(np.random.random([2, 5]))
time.sleep(0.1)
| 6,737 | Python | 29.488688 | 98 | 0.523081 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/config_utils/sim_config.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.config_utils.default_scene_params import *
import copy
import omni.usd
import numpy as np
import torch
import carb
class SimConfig():
def __init__(self, config: dict = None):
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
from omni.isaac.core.utils.extensions import enable_extension
if self._config["headless"] == True and not self._sim_params["enable_cameras"] and not self._config["enable_livestream"]:
self._sim_params["use_flatcache"] = False
self._sim_params["enable_viewport"] = False
else:
self._sim_params["enable_viewport"] = True
enable_extension("omni.kit.viewport.bundle")
enable_extension("omni.replicator.isaac")
if self._sim_params["disable_contact_processing"]:
carb.settings.get_settings().set_bool("/physics/disableContactProcessing", True)
carb.settings.get_settings().set_bool("/physics/physxDispatcher", True)
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def make_kinematic(self, name, prim, cfg, value=None):
# make rigid body kinematic (fixed base and no collision)
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "make_kinematic")
if value:
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
if rb:
rb.CreateKinematicEnabledAttr().Set(True)
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.make_kinematic(name, prim, cfg, cfg["make_kinematic"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim_tmp = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim_tmp.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim_tmp.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
children_prims = prim_tmp.GetPrim().GetChildren()
prims = prims + children_prims
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
collision_body = UsdPhysics.CollisionAPI.Get(stage, cur_prim.GetPath())
articulation = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
if rb:
self.apply_rigid_body_settings(name, cur_prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, cur_prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, cur_prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
self.set_articulation_position_iteration(name, cur_prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, cur_prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, cur_prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, cur_prim, cfg["stabilization_threshold"])
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
| 18,833 | Python | 44.383132 | 129 | 0.638188 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/config_utils/default_scene_params.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001
}
default_physics_material = {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"disable_contact_processing": False,
"default_physics_material": default_physics_material
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"make_kinematic": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1
}
| 4,803 | Python | 41.140351 | 119 | 0.684364 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/config_utils/path_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import carb
from hydra.utils import to_absolute_path
import os
def is_valid_local_file(path):
return os.path.isfile(path)
def is_valid_ov_file(path):
import omni.client
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def download_ov_file(source_path, target_path):
import omni.client
result = omni.client.copy(source_path, target_path)
if result == omni.client.Result.OK:
return True
return False
def break_ov_path(path):
import omni.client
return omni.client.break_url(path)
def retrieve_checkpoint_path(path):
# check if it's a local path
if is_valid_local_file(path):
return to_absolute_path(path)
# check if it's an OV path
elif is_valid_ov_file(path):
ov_path = break_ov_path(path)
file_name = os.path.basename(ov_path.path)
target_path = f"checkpoints/{file_name}"
copy_to_local = download_ov_file(path, target_path)
return to_absolute_path(target_path)
else:
carb.log_error(f"Invalid checkpoint path: {path}")
return None | 2,656 | Python | 38.656716 | 80 | 0.735693 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/hydra_cfg/hydra_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
| 2,207 | Python | 51.571427 | 110 | 0.775714 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/hydra_cfg/reformat.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omegaconf import DictConfig, ListConfig, OmegaConf
from typing import Dict
def omegaconf_to_dict(d: DictConfig)->Dict:
"""Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation."""
ret = {}
for k, v in d.items():
if isinstance(v, DictConfig):
ret[k] = omegaconf_to_dict(v)
elif isinstance(v, ListConfig):
ret[k] = list(v)
else:
ret[k] = v
return ret
def print_dict(val, nesting: int = -4, start: bool = True):
"""Outputs a nested dictionory."""
if type(val) == dict:
if not start:
print('')
nesting += 4
for k in val:
print(nesting * ' ', end='')
print(k, end=': ')
print_dict(val[k], nesting, start=False)
else:
print(val) | 2,390 | Python | 40.224137 | 95 | 0.703347 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/terrain_utils/terrain_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.random import choice
from scipy import interpolate
from math import sqrt
from omni.isaac.core.prims import XFormPrim
from pxr import UsdPhysics, Sdf, Gf, PhysxSchema
def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int(
terrain.length * terrain.horizontal_scale / downsampled_scale)))
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear')
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x-xx)) / center_x
yy = (center_y - np.abs(center_y-yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i-width, 4))
start_j = np.random.choice(range(0, j-length, 4))
terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5*amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype(
terrain.height_field_raw.dtype)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height-1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols)
x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold)
move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold)
move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold)
move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold)
move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold)
move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold)
xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale
yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols-1) + i*num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2*i*(num_cols-1)
stop = start + 2*(num_cols-1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start+1:stop:2, 0] = ind0
triangles[start+1:stop:2, 1] = ind2
triangles[start+1:stop:2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces))
terrain = XFormPrim(prim_path="/World/terrain",
name="terrain",
position=position,
orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 17,478 | Python | 42.917085 | 147 | 0.655166 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/terrain_utils/create_terrain_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 7,869 | Python | 43.213483 | 166 | 0.650654 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/usd_utils/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
""" Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
""" Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(assetPath=instance_usd_path, primPath=str(parent_prim.GetPath()))
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,639 | Python | 43.761904 | 126 | 0.675829 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/config.yaml | task:
name: Noob
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: ./train.csv
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
frame_bound:
- 1850
- 2850
train:
name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp:
- 256
- 256
critic_mlp:
- 256
- 256
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: true
normalize_value: false
normalize_advantage: true
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.75
gamma: 0.75
e_clip: 0.2
clip_value: false
learning_rate: 0.001
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
experiment: ''
num_envs: ''
seed: 42
torch_deterministic: false
rl_device: cpu
max_iterations: ''
test: false
checkpoint: ''
headless: false
enable_livestream: false
mt_timeout: 30
render: false
debug: false
wandb: true
save: true
profile: false
test_data: ''
| 1,197 | YAML | 19.305084 | 62 | 0.593985 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/actor_critic_model.py | from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 | Python | 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/replay_buffer.py | import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
if self.is_empty():
return None
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return
def is_empty(self):
return self._total_count == 0
class ReplayBufferCPU(ReplayBuffer):
def __init__(self, buffer_size, device):
self.sample_device = device
super().__init__(buffer_size, device='cpu')
def sample(self, n):
x = super().sample(n)
if x is not None:
for k in x.keys():
x[k] = x[k].to(self.sample_device)
return x
| 2,897 | Python | 26.339622 | 90 | 0.534001 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/pg_agent.py | import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 | Python | 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/experience.py | import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 | Python | 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/utils.py | import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 | Python | 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/dataset.py | import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 | Python | 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/codes/ppo_agent.py | import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 | Python | 33.708474 | 123 | 0.582926 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/config.yaml | task:
name: Noob
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: ./train.csv
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
frame_bound:
- 1850
- 2850
train:
name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp:
- 256
- 256
critic_mlp:
- 256
- 256
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: true
normalize_value: false
normalize_advantage: true
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.9
gamma: 0.9
e_clip: 0.2
clip_value: false
learning_rate: 0.001
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
experiment: ''
num_envs: ''
seed: 42
torch_deterministic: false
rl_device: cpu
max_iterations: ''
test: false
checkpoint: ''
headless: false
enable_livestream: false
mt_timeout: 30
render: false
debug: false
wandb: true
save: true
profile: false
test_data: ''
| 1,195 | YAML | 19.271186 | 62 | 0.593305 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/actor_critic_model.py | from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 | Python | 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/replay_buffer.py | import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
if self.is_empty():
return None
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return
def is_empty(self):
return self._total_count == 0
class ReplayBufferCPU(ReplayBuffer):
def __init__(self, buffer_size, device):
self.sample_device = device
super().__init__(buffer_size, device='cpu')
def sample(self, n):
x = super().sample(n)
if x is not None:
for k in x.keys():
x[k] = x[k].to(self.sample_device)
return x
| 2,897 | Python | 26.339622 | 90 | 0.534001 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/pg_agent.py | import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 | Python | 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/experience.py | import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 | Python | 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/utils.py | import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 | Python | 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/dataset.py | import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 | Python | 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-213147/codes/ppo_agent.py | import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 | Python | 33.708474 | 123 | 0.582926 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/torch_utils.py | from typing import Optional, Sequence
import torch
def to_torch_size(*size) -> torch.Size:
if len(size) == 1 and isinstance(size[0], Sequence):
torch_size = size[0]
else:
torch_size = list(size)
return torch.Size(torch_size)
| 255 | Python | 22.272725 | 56 | 0.647059 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/wandb_logger.py | import torch
import wandb
# class WandbLogger:
# def __init__(self, project, run_name, log=True):
# self.data = {}
# self.data_cnt = {}
# self.is_log = log
# if log:
# wandb.init(project=project)
# wandb.run.name = run_name
# wandb.run.save()
# def stop(self):
# wandb.finish()
# def log(self, datas: dict):
# if self.is_log:
# for k, v in datas.items():
# if isinstance(v, torch.Tensor):
# if v.nelement == 0:
# v = torch.nan
# v = v.mean().item()
# n = self.data_cnt.get(k, 0)
# x = self.data.get(k, 0)
# self.data_cnt[k] = n + 1
# self.data[k] = x * n / (n+1) + v / (n+1)
# def upload(self):
# if self.is_log:
# wandb.log(self.data)
# self.data.clear()
# self.data_cnt.clear()
class WandbLogger:
def __init__(self, project, run_name, log=True):
pass
def log(self, datas: dict):
pass
def upload(self):
pass
def stop(self):
pass | 1,176 | Python | 25.155555 | 58 | 0.443027 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/materials.py | import numpy as np
from pxr import Sdf
from omni.isaac.core.materials import omni_pbr
class OmniPBR(omni_pbr.OmniPBR):
def __init__(self, name, prim_path=None, color: list = None, opacity=None, reflection=None):
if prim_path is None:
prim_path = '/World/Looks/' + name
super().__init__(prim_path, name, color=color)
if reflection is not None:
self.set_reflection_roughness(1 - reflection)
if opacity is not None:
self.set_opacity(opacity)
def set_opacity(self, value: float):
enable_opacity = value < 1
if self.shaders_list[0].GetInput("enable_opacity").Get() is None:
self.shaders_list[0].CreateInput("enable_opacity", Sdf.ValueTypeNames.Bool).Set(enable_opacity)
else:
self.shaders_list[0].GetInput("enable_opacity").Set(enable_opacity)
if self.shaders_list[0].GetInput("opacity_constant").Get() is None:
self.shaders_list[0].CreateInput("opacity_constant", Sdf.ValueTypeNames.Float).Set(value)
else:
self.shaders_list[0].GetInput("opacity_constant").Set(value)
def set_color(self, color) -> None:
super().set_color(np.array(color))
| 1,216 | Python | 39.566665 | 107 | 0.63898 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/demo_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def initialize_demo(config, env, init_sim=True):
from omniisaacgymenvs.demos.anymal_terrain import AnymalTerrainDemo
# Mappings from strings to environments
task_map = {
"AnymalTerrain": AnymalTerrainDemo,
}
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task | 2,167 | Python | 44.166666 | 107 | 0.757268 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/runner.py | import os
import time
import torch
import shutil
import random
import numpy as np
from datetime import datetime
from utils.hydra_cfg.hydra_utils import *
from utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from utils.wandb_logger import WandbLogger
class StopException(Exception):
pass
class _Runner:
def __init__(self):
pass
def init(self, cfg):
self.cfg_dict = omegaconf_to_dict(cfg)
self.test = cfg.test
self.checkpoint = cfg.checkpoint
self.clear_cmd()
self.task_name = cfg.task.name
self.start_time = datetime.now().strftime('%Y%m%d-%H%M%S')
# create save dir
self.save = cfg.save
self.run_name = self.start_time
self.task_dir = os.path.join('./runs', self.task_name)
if self.save:
self.run_dir = os.path.join(self.task_dir, self.run_name)
os.makedirs(self.run_dir, exist_ok=True)
# set seed
cfg.seed = 42
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed_all(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# logger
self.logger = WandbLogger(self.task_name, self.start_time, cfg.wandb)
# backup code
if self.save:
code_path = './learning'
if code_path is not None:
shutil.copytree(code_path, os.path.join(self.run_dir, 'codes'))
# dump config dict
if self.save:
with open(os.path.join(self.run_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
# get env & agent
from utils.task_util import get_env_agent
self.env, self.agent = get_env_agent(self.cfg_dict)
if self.test:
if self.checkpoint == '':
self.checkpoint = max(os.listdir(self.task_dir))
# load checkpoint
if self.checkpoint:
self.load_model(self.checkpoint)
if cfg.render:
self.write_cmd('render')
def run(self):
try:
if self.test:
self.agent.test()
else:
self.agent.train()
self.stop()
except StopException:
pass
def stop(self):
self.save_model('FinalEpoch')
self.logger.stop()
raise StopException
def read_cmd(self):
try:
with open('./controller', 'r') as f:
return f.read().rstrip()
except:
return ''
def write_cmd(self, cmd):
try:
with open('./controller', 'w') as f:
return f.write(cmd)
except:
pass
def clear_cmd(self):
open('./controller', 'w').close()
def close(self):
pass
def control(self):
cmd = self.read_cmd()
if cmd == 'save':
self.clear_cmd()
self.save_model(f'Epoch{self.agent.epoch_num}')
elif cmd == 'stop':
self.stop()
elif cmd == 'record':
self.clear_cmd()
self.env.record(f'Epoch{self.agent.epoch_num}')
elif cmd == 'close':
self.stop()
self.close()
self.env.render = cmd == 'render'
def get_save_dir(self, sub_dir, epoch_dir=False):
if epoch_dir:
save_dir = os.path.join(self.run_dir, sub_dir, f'Epoch{self.agent.epoch_num}')
else:
save_dir = os.path.join(self.run_dir, sub_dir)
os.makedirs(save_dir, exist_ok=True)
return save_dir
def save_model(self, name):
if self.save:
path = os.path.join(self.get_save_dir('model'), name)
torch.save({'agent': self.agent.save(), 'env': self.env.save()}, path)
print(f'Save model to {path}')
def load_model(self, name, epoch=None):
epoch = 'FinalEpoch' if epoch is None else f'Epoch{epoch}'
model_dir = os.path.join(self.task_dir, name, 'model', epoch)
datas = torch.load(model_dir)
self.agent.load(datas['agent'])
self.env.load(datas['env'])
Runner = _Runner()
| 4,120 | Python | 26.291391 | 90 | 0.546359 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/rotation_utils.py | import sys
from torch.autograd import Variable
import torch.distributed.algorithms
sys.path.append('/home/hardy/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.core')
import numpy as np
from numpy import pi, sin, cos
import plotly.express as px
import plotly.io as pio
from utils.torch_utils import *
pio.renderers.default = "browser"
# auto-shaping
def ash(func, x, in_size):
shape = x.shape[:-1]
return func(x.view(shape + (-1, in_size))).view(shape + (-1,))
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
def rad2deg(radian_value, device=None):
return torch.rad2deg(radian_value).float().to(device)
def deg2rad(degree_value, device=None):
return torch.deg2rad(degree_value).float().to(device)
def zero_pos(shape, device=None):
return torch.zeros(to_torch_size(shape) + (3,), device=device)
def zero_pos_like(x):
return zero_pos(x.shape[:-1], x.device)
def full_pos(shape, value, device=None):
x = torch.zeros(to_torch_size(shape) + (3,), device=device)
x[:] = torch.tensor(value, device=device)
return x
def full_pos_like(x, value):
return full_pos(x.shape[:-1], value, x.device)
def identity_quat(shape, device=None):
q = torch.zeros(to_torch_size(shape) + (4,), device=device)
q[..., 0] = 1
return q
def identity_quat_like(x):
return identity_quat(x.shape[:-1], x.device)
@torch.jit.script
def quat_unit(a):
return normalize(a)
# @torch.jit.script
# def quat_mul_unnorm(a, b):
# shape = a.shape
# a = a.reshape(-1, 4)
# b = b.reshape(-1, 4)
#
# w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
# w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
# ww = (z1 + x1) * (x2 + y2)
# yy = (w1 - y1) * (w2 + z2)
# zz = (w1 + y1) * (w2 - z2)
# xx = ww + yy + zz
# qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
# w = qq - ww + (z1 - y1) * (y2 - z2)
# x = qq - xx + (x1 + w1) * (x2 + w2)
# y = qq - yy + (w1 - x1) * (y2 + z2)
# z = qq - zz + (z1 + y1) * (w2 - x2)
# quat = torch.stack([w, x, y, z], dim=-1).view(shape)
#
# return quat
# @torch.jit.script
# def quat_inverse(a):
# shape = a.shape
# a = a.reshape(-1, 4)
# return torch.cat((a[..., 0:1], -a[..., 1:]), dim=-1).view(shape)
@torch.jit.script
def quat_mul_unnorm(a, b):
w1, x1, y1, z1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
w2, x2, y2, z2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([w, x, y, z], dim=-1)
return quat
@torch.jit.script
def quat_inverse(a):
a = a.clone()
a[..., 1:] *= -1
return a
@torch.jit.script
def quat_rotate(q, v):
q_w = q[..., 0:1]
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
q_w = q[..., 0].unsqueeze(-1)
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a - b + c
@torch.jit.script
def quat_mul(q0, q1):
return quat_unit(quat_mul_unnorm(q0, q1))
@torch.jit.script
def quat_div(x, y):
return quat_mul(quat_inverse(y), x)
@torch.jit.script
def quat_diff_rad(a, b):
eps = 1e-5
b_conj = quat_inverse(b)
mul = quat_mul_unnorm(a, b_conj)
# 2 * torch.acos(torch.abs(mul[..., -1]))
return 2.0 * torch.asin(torch.clamp(torch.norm(mul[..., 1:], p=2, dim=-1), max=1-eps, min=eps-1))
@torch.jit.script
def quat_to_angle_axis(q):
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qw, qx, qy, qz = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., qw] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
eps = 1e-5
qw = q[..., 0, None].clamp(-1+eps, 1-eps)
q_axis = q[..., 1:]
angle = normalize_angle(2 * qw.acos())
axis = q_axis / torch.sqrt(1 - qw ** 2)
return angle * axis
# @torch.jit.script
# def quat_to_exp_map(q):
# # compute exponential map from quaternion
# # q must be normalized
# angle, axis = quat_to_angle_axis(q)
# exp_map = angle_axis_to_exp_map(angle, axis)
# return exp_map
# @torch.jit.script
# def exp_map_to_angle_axis(exp_map):
# min_theta = 1e-5
#
# angle = torch.norm(exp_map, dim=-1)
# angle_exp = torch.unsqueeze(angle, dim=-1)
# axis = exp_map / angle_exp
# angle = normalize_angle(angle)
#
# default_axis = torch.zeros_like(exp_map)
# default_axis[..., -1] = 1
#
# mask = angle > min_theta
# angle = torch.where(mask, angle, torch.zeros_like(angle))
# mask_expand = mask.unsqueeze(-1)
# axis = torch.where(mask_expand, axis, default_axis)
#
# return angle, axis
# @torch.jit.script
# def exp_map_to_quat(exp_map):
# angle, axis = exp_map_to_angle_axis(exp_map)
# q = quat_from_angle_axis(angle, axis)
# return q
@torch.jit.script
def exp_map_to_quat(exp_map):
eps = 1e-12
angle = torch.norm(exp_map, dim=-1, keepdim=True)
axis = exp_map / (angle + eps)
theta = normalize_angle(angle) / 2
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def quat_to_tan_norm(q):
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def quat_from_rotation_matrix(m):
m = m.unsqueeze(0)
diag0 = m[..., 0, 0]
diag1 = m[..., 1, 1]
diag2 = m[..., 2, 2]
# Math stuff.
w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
# Only modify quaternions where w > x, y, z.
c0 = (w >= x) & (w >= y) & (w >= z)
x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign()
y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign()
z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign()
# Only modify quaternions where x > w, y, z
c1 = (x >= w) & (x >= y) & (x >= z)
w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign()
y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign()
z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign()
# Only modify quaternions where y > w, x, z.
c2 = (y >= w) & (y >= x) & (y >= z)
w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign()
x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign()
z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign()
# Only modify quaternions where z > w, x, y.
c3 = (z >= w) & (z >= x) & (z >= y)
w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign()
x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign()
y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign()
return quat_unit(torch.stack([w, x, y, z], dim=-1)).squeeze(0)
@torch.jit.script
def quat_from_dir(v):
u = torch.zeros_like(v)
u[..., 2] = 1
xyz = torch.cross(u, v, dim=-1)
w = torch.sqrt((u ** 2).sum(-1) * (v ** 2).sum(-1)) + (u * v).sum(-1)
q = quat_unit(torch.cat([w.unsqueeze(-1), xyz], dim=-1))
q[q.abs().sum(-1) < 1e-6, [1]] = 1
return q
@torch.jit.script
def quat_to_tan_norm(q):
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def exp_map_mul(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_mul(q0, q1)).view(shape)
@torch.jit.script
def exp_map_div(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_div(q0, q1)).view(shape)
@torch.jit.script
def exp_map_diff_rad(e0, e1):
return quat_diff_rad(exp_map_to_quat(e0), exp_map_to_quat(e1))
@torch.jit.script
def lerp(p0, p1, t):
return (1 - t) * p0 + t * p1
# @torch.jit.script
def slerp(q0, q1, t):
qw, qx, qy, qz = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta)
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta)
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta
ratioB = torch.sin(t * half_theta) / sin_half_theta
new_q_w = ratioA * q0[..., qw:qw + 1] + ratioB * q1[..., qw:qw + 1]
new_q_x = ratioA * q0[..., qx:qx + 1] + ratioB * q1[..., qx:qx + 1]
new_q_y = ratioA * q0[..., qy:qy + 1] + ratioB * q1[..., qy:qy + 1]
new_q_z = ratioA * q0[..., qz:qz + 1] + ratioB * q1[..., qz:qz + 1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_w, new_q_x, new_q_y, new_q_z], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
@torch.jit.script
def normalize_pos(pos):
z = torch.zeros_like(pos)
z[..., 2] = 1
return z * pos.norm(p=2, dim=-1, keepdim=True)
def draw_exp_map(e):
draw_quaternion(exp_map_to_quat(e))
def draw_quaternion(q):
v = torch.Tensor([0, 0, 1]).repeat(len(q), 1)
v = quat_rotate(q, v)
fig = px.scatter_3d(x=v[:, 0], y=v[:, 1], z=v[:, 2])
fig.update_layout(
scene=dict(
xaxis=dict(range=[-1, 1]),
yaxis=dict(range=[-1, 1]),
zaxis=dict(range=[-1, 1]),
)
)
fig.update_scenes(aspectmode='cube')
fig_add_sphere(fig)
fig.show()
def random_quaternion(size):
return exp_map_to_quat((torch.rand([size, 3]) - 0.5) * 2 * torch.pi)
def fig_add_sphere(fig):
theta = np.linspace(0, 2 * pi, 120)
phi = np.linspace(0, pi, 60)
u, v = np.meshgrid(theta, phi)
xs = cos(u) * sin(v)
ys = sin(u) * sin(v)
zs = cos(v)
x, y, z = [], [], []
for t in [theta[10 * k] for k in range(12)]: # meridians:
x.extend(list(cos(t) * sin(phi)) + [None]) # None is inserted to mark the end of a meridian line
y.extend(list(sin(t) * sin(phi)) + [None])
z.extend(list(cos(phi)) + [None])
for s in [phi[6 * k] for k in range(10)]: # parallels
x.extend(list(cos(theta) * sin(s)) + [None]) # None is inserted to mark the end of a parallel line
y.extend(list(sin(theta) * sin(s)) + [None])
z.extend([cos(s)] * 120 + [None])
fig.add_surface(x=xs, y=ys, z=zs,
colorscale=[[0, '#ffffff'], [1, '#ffffff']],
showscale=False, opacity=0.5) # or opacity=1
fig.add_scatter3d(x=x, y=y, z=z, mode='lines', line_width=3, line_color='rgb(10,10,10)')
def _test_exp_map_diff_rad_grad():
n = 10000
print('testing...')
for _ in range(1000):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_diff_rad(x, torch.rand([n, 3])).mean()
y.backward()
if x.grad.isnan().any():
print(y)
print('finish')
def _test_exp_map_to_quat_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_to_quat(x).mean()
y.backward()
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_quat_to_exp_map_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]), requires_grad=True)
y = exp_map_to_quat(x)
y = quat_to_exp_map(y)
y.mean().backward()
print((y - x).sum())
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_slerp():
n = 15
q0 = random_quaternion(1).repeat(n, 1)
q1 = random_quaternion(1).repeat(n, 1)
t = torch.arange(n).float() / n
q = slerp(q0, q1, t.unsqueeze(-1))
draw_quaternion(q)
if __name__ == '__main__':
_test_quat_to_exp_map_grad()
| 15,284 | Python | 26.84153 | 107 | 0.537817 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/control_panel.py | import omni.ui as ui
def _preproc_kwargs(kwargs):
for k in kwargs.keys():
if k in ['width', 'height']:
kwargs[k] = ui.Length(kwargs[k])
return kwargs
class ControlPanel:
def __init__(self, name):
self._window = ui.Window(name, auto_resize=True)
self._components = dict()
def __getitem__(self, name):
if isinstance(name, (list, tuple)):
return [self.__getitem__(x) for x in name]
item = self._components.get(name)
if isinstance(item, ui.FloatSlider):
return item.model.get_value_as_float()
elif isinstance(item, ui.CheckBox):
return item.model.get_value_as_bool()
else:
raise IndexError
def __setitem__(self, key, value):
if isinstance(key, (list, tuple)):
for k, v in zip(key, value):
self.__setitem__(k, v)
return
item = self._components.get(key)
if isinstance(item, ui.FloatField):
item.model.set_value(value)
else:
raise IndexError
def add_slider(self, name, **kwargs):
self._components[name] = lambda: ui.FloatSlider(**_preproc_kwargs(kwargs))
def add_float(self, name, **kwargs):
self._components[name] = lambda: ui.FloatField(**_preproc_kwargs(kwargs))
def add_check_box(self, name, **kwargs):
self._components[name] = lambda: ui.CheckBox(**_preproc_kwargs(kwargs))
def build(self):
with self._window.frame:
with ui.VStack(width=150):
for k, v in self._components.items():
ui.Label(k)
self._components[k] = v()
| 1,685 | Python | 29.654545 | 82 | 0.558457 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/task_util.py | from env import ForexEnv
from learning.ppo_agent import PPOAgent
from learning.pg_agent import PGAgent
def get_env_agent(config):
env_map = {
'Noob': ForexEnv,
}
agent_map = {
'PPOAgent': PPOAgent,
'PGAgent': PGAgent
}
env = env_map[config['task']['name']](config)
agent = agent_map[config['train']['name']](params=config['train']['params'], env=env)
return env, agent
| 426 | Python | 20.349999 | 89 | 0.617371 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/domain_randomization/randomize.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import omni
import omni.replicator.core as rep
import omni.replicator.isaac as dr
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
class Randomizer():
def __init__(self, sim_config):
self._cfg = sim_config.task_config
self._config = sim_config.config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_startup"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} " + \
"on_startup are provided: operation, distribution, distribution_parameters.")
view = task._env._world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"]["distribution_parameters"],
operation=params["on_startup"]["operation"],
sync_dim_noise=True
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
rep.set_global_seed(self._config["seed"])
with dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
dr.physics_view.register_simulation_context(task._env._world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[view_name],
)
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
dr.physics_view.register_articulation_view(
articulation_view=task._env._world.scene._scene_registry.articulated_views[view_name],
)
for attribute, params in randomization_params["articulation_views"][view_name].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
rep.orchestrator.run()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following observations on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_reset")] = np.array(self._observations_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._observations_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._observations_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following observations on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("observations", "on_interval")] = np.array(self._observations_dr_params["on_interval"]["distribution_parameters"])
self._observations_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"])
self._observations_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["rl_device"])
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_reset"].keys()):
raise ValueError(f"Please ensure the following actions on_reset randomization parameters are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_reset")] = np.array(self._actions_dr_params["on_reset"]["distribution_parameters"])
if "on_interval" in self._actions_dr_params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(self._actions_dr_params["on_interval"].keys()):
raise ValueError(f"Please ensure the following actions on_interval randomization parameters are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("actions", "on_interval")] = np.array(self._actions_dr_params["on_interval"]["distribution_parameters"])
self._actions_counter_buffer = torch.zeros((self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"])
self._actions_correlated_noise = torch.zeros((self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["rl_device"])
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"]).nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"])
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=(len(reset_ids), buffer.shape[1]), device=self._config["rl_device"])
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(('operation','distribution', 'distribution_parameters')).issubset(params["on_reset"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: " + \
"operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(params["on_reset"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with dr.gate.on_env_reset():
dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(('frequency_interval', 'operation','distribution', 'distribution_parameters')).issubset(params["on_interval"]):
raise ValueError(f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: " + \
"frequency_interval, operation, distribution, distribution_parameters.")
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_interval")] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute]["on_interval"] = self._generate_distribution(
dimension=dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]]*dimension, [distribution_parameters[1]]*dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [[distribution_parameters[0]] * (dimension // 3), [distribution_parameters[1]] * (dimension // 3)]
else:
raise ValueError(f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions.")
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for observations {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(f"Please provide distribution_parameters for actions {distribution_path[1]} " +
"in the form of [dist_param_1, dist_param_2]")
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]})
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(distribution_path[-2], dimension, distribution_parameters)
dr.utils.set_distribution_params(replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]})
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][distribution_path[2]]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform" \
or replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleLogUniform":
return dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(f"Cannot find a valid domain randomization distribution using the path {distribution_path}.")
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(size, device=device) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp((np.log(distribution_parameters[1]) - np.log(distribution_parameters[0])) * torch.rand(size, device=device) + np.log(distribution_parameters[0]))
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3,1).T
else:
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters))
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} mass", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(self._sanitize_distribution_parameters(attribute=f"{view.name} density", dimension=1, params=distribution_parameters))
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 41,564 | Python | 70.787565 | 256 | 0.602877 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/rlgames/rlgames_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
from rl_games.algos_torch import torch_ext
import torch
import numpy as np
from typing import Callable
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats. """
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info"
if isinstance(infos, dict):
if 'episode' in infos:
self.ep_infos.append(infos['episode'])
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if isinstance(v, float) or isinstance(v, int) or (isinstance(v, torch.Tensor) and len(v.shape) == 0):
self.direct_info[k] = v
def after_clear_stats(self):
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar('Episode/' + key, value, epoch_num)
self.ep_infos.clear()
for k, v in self.direct_info.items():
self.writer.add_scalar(f'{k}/frame', v, frame)
self.writer.add_scalar(f'{k}/iter', v, epoch_num)
self.writer.add_scalar(f'{k}/time', v, total_time)
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar('scores/mean', mean_scores, frame)
self.writer.add_scalar('scores/iter', mean_scores, epoch_num)
self.writer.add_scalar('scores/time', mean_scores, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]['env_creator'](**kwargs)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info['action_space'] = self.env.action_space
info['observation_space'] = self.env.observation_space
if self.env.num_states > 0:
info['state_space'] = self.env.state_space
print(info['action_space'], info['observation_space'], info['state_space'])
else:
print(info['action_space'], info['observation_space'])
return info
| 5,149 | Python | 42.277311 | 121 | 0.642649 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/dash/live_plot.py | import os
import socket
import logging
import threading
import numpy as np
import time
import torch
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dash import Dash, html, dcc, Output, Input
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
pio.renderers.default = "browser"
callback_dict = dict()
def callback(*args, **kwargs):
def wrapped(func):
global callback_dict
callback_dict[func.__name__] = (args, kwargs)
return func
return wrapped
class LivePlot:
def __init__(self, name, titles, steps):
self.name = name
self.titles = titles
self.dim_names = list(titles.keys())
self.dim_labels = list(titles.values())
self.num_dims = len(self.dim_names)
for i, labels in enumerate(self.dim_labels):
if isinstance(labels, list):
self.dim_labels[i] = ['All'] + labels
if isinstance(labels, int):
self.dim_labels[i] = ['All'] + list(map(str, range(labels)))
self.steps = 0
self.size = steps
self.time_axis = np.arange(steps)
self.free_dim = -1
self.datas = np.full([steps] + [len(x) - 1 for x in self.dim_labels], np.nan)
self._build_app()
self._create_thread()
def _build_app(self):
dropdowns = []
for name, labels in zip(self.dim_names, self.dim_labels):
dropdowns.append(name)
options = {str(i): label for i, label in enumerate(labels)}
dropdowns.append(dcc.Dropdown(id=name, options=options, value='0'))
app = Dash(__name__)
app.layout = html.Div([
html.H1(children=self.name, style={'textAlign': 'center'}),
html.Div(dropdowns),
html.Div([
dcc.Graph(id='live-graph'),
dcc.Interval(
id='interval-component',
interval=16,
n_intervals=0
)
])
])
for func_name, (args, kwargs) in callback_dict.items():
func = getattr(self, func_name)
app.callback(*args, **kwargs)(func)
app.callback(
[Output(i, 'value') for i in self.dim_names],
[Input(i, 'value') for i in self.dim_names]
)(self._update_figure)
self._update_figure(*(['1'] * self.num_dims))
self._app = app
def _create_thread(self):
port = 8050
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)):
break
else:
port += 1
run_server = lambda: self._app.run(host='0.0.0.0', port=port)
thread = threading.Thread(target=run_server)
thread.daemon = True
thread.start()
time.sleep(0.1)
print('live plot:', self.name, f'http://localhost:{port}')
self._thread = thread
def _update_figure(self, *values, save_path=None):
values = [str(v) for v in values]
idx = [slice(None)]
titles = [' ']
# print('free dim', self.free_dim)
free_dim = -1
for i, v in enumerate(values):
if v == '0':
if free_dim == -1:
free_dim = i
else:
values[i] = '1'
if free_dim != self.free_dim and self.free_dim != -1:
values[self.free_dim] = '1'
self.free_dim = free_dim
for i in range(self.num_dims):
if values[i] == '0':
titles = self.dim_labels[i][1:]
idx.append(slice(None))
else:
idx.append(int(values[i]) - 1)
self.idx = tuple(idx)
# print(self.idx)
# print(titles)
self._updating = True
self.fig = go.FigureWidget(make_subplots(rows=len(titles), cols=1, subplot_titles=titles))
for i, data in enumerate(self._get_plot_data()):
self.fig.add_trace(go.Scatter(name='', x=self.time_axis, y=data), row=i+1, col=1)
self.fig.update_layout(height=200*len(titles)+100, template='plotly')
self._updating = False
if save_path is not None:
self.fig.write_html(save_path)
# print(values)
return values
def _get_plot_data(self):
datas = self.datas[self.idx]
return np.expand_dims(datas, 0) if datas.ndim == 1 else np.swapaxes(datas, 0, 1)
def update(self, datas):
if isinstance(datas, torch.Tensor):
datas = datas.detach().cpu().numpy()
if self.steps >= self.size:
self.time_axis += 1
self.datas[:-1] = self.datas[1:]
self.datas[-1] = datas
else:
self.datas[self.steps] = datas
self.steps += 1
while self._updating:
time.sleep(0.01)
for i, data in enumerate(self._get_plot_data()):
self.fig.data[i]['x'] = self.time_axis
self.fig.data[i]['y'] = data
@callback(
Output('live-graph', 'figure'),
Input('interval-component', 'n_intervals')
)
def _update_graph(self, n):
return self.fig
def select_labels(self, *labels):
# ToDo update selector label
self._update_figure(*labels)
def snapshot(self, dir_path, free_dim=0):
def export(labels, names):
dim = len(labels)
if dim == self.num_dims:
name = self.name + ': ' + ' '.join(names) if names else self.name
save_path = os.path.join(dir_path, name) + '.html'
self._update_figure(*labels, save_path=save_path)
else:
if dim == free_dim:
export(labels + [0], names)
else:
for i, s in enumerate(self.dim_labels[dim][1:]):
export(labels + [i+1], names + [s])
export([], [])
def save(self, dir_path):
state = self.__dict__.copy()
state.pop('_app')
state.pop('_thread')
torch.save(state, os.path.join(dir_path, self.name + '.liveplot'))
@staticmethod
def load(path):
plot = LivePlot.__new__(LivePlot)
plot.__dict__ = torch.load(path)
plot._build_app()
plot._create_thread()
return plot
if __name__ == '__main__':
plot = LivePlot('1', {'1': ['a', 'b'], '2': 5}, 30)
plot2 = LivePlot('2', {'1': ['a', 'b'], '2': 5}, 30)
import time
for i in range(10):
plot.update(np.random.random([2, 5]))
plot2.update(np.random.random([2, 5]))
time.sleep(0.1)
| 6,737 | Python | 29.488688 | 98 | 0.523081 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/config_utils/sim_config.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.config_utils.default_scene_params import *
import copy
import omni.usd
import numpy as np
import torch
import carb
class SimConfig():
def __init__(self, config: dict = None):
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
from omni.isaac.core.utils.extensions import enable_extension
if self._config["headless"] == True and not self._sim_params["enable_cameras"] and not self._config["enable_livestream"]:
self._sim_params["use_flatcache"] = False
self._sim_params["enable_viewport"] = False
else:
self._sim_params["enable_viewport"] = True
enable_extension("omni.kit.viewport.bundle")
enable_extension("omni.replicator.isaac")
if self._sim_params["disable_contact_processing"]:
carb.settings.get_settings().set_bool("/physics/disableContactProcessing", True)
carb.settings.get_settings().set_bool("/physics/physxDispatcher", True)
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import UsdPhysics, PhysxSchema
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def make_kinematic(self, name, prim, cfg, value=None):
# make rigid body kinematic (fixed base and no collision)
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "make_kinematic")
if value:
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
if rb:
rb.CreateKinematicEnabledAttr().Set(True)
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_position_iteration_count", solver_position_iteration_count)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(name, "solver_velocity_iteration_count", solver_position_iteration_count)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.make_kinematic(name, prim, cfg, cfg["make_kinematic"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg):
from pxr import UsdPhysics, PhysxSchema
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim_tmp = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim_tmp.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim_tmp.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
children_prims = prim_tmp.GetPrim().GetChildren()
prims = prims + children_prims
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
collision_body = UsdPhysics.CollisionAPI.Get(stage, cur_prim.GetPath())
articulation = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
if rb:
self.apply_rigid_body_settings(name, cur_prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, cur_prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, cur_prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
self.set_articulation_position_iteration(name, cur_prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, cur_prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, cur_prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, cur_prim, cfg["stabilization_threshold"])
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
| 18,833 | Python | 44.383132 | 129 | 0.638188 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/config_utils/default_scene_params.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001
}
default_physics_material = {
"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0
}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"disable_contact_processing": False,
"default_physics_material": default_physics_material
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"make_kinematic": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1
}
| 4,803 | Python | 41.140351 | 119 | 0.684364 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/config_utils/path_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import carb
from hydra.utils import to_absolute_path
import os
def is_valid_local_file(path):
return os.path.isfile(path)
def is_valid_ov_file(path):
import omni.client
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def download_ov_file(source_path, target_path):
import omni.client
result = omni.client.copy(source_path, target_path)
if result == omni.client.Result.OK:
return True
return False
def break_ov_path(path):
import omni.client
return omni.client.break_url(path)
def retrieve_checkpoint_path(path):
# check if it's a local path
if is_valid_local_file(path):
return to_absolute_path(path)
# check if it's an OV path
elif is_valid_ov_file(path):
ov_path = break_ov_path(path)
file_name = os.path.basename(ov_path.path)
target_path = f"checkpoints/{file_name}"
copy_to_local = download_ov_file(path, target_path)
return to_absolute_path(target_path)
else:
carb.log_error(f"Invalid checkpoint path: {path}")
return None | 2,656 | Python | 38.656716 | 80 | 0.735693 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/hydra_cfg/hydra_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
| 2,207 | Python | 51.571427 | 110 | 0.775714 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/hydra_cfg/reformat.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omegaconf import DictConfig, ListConfig, OmegaConf
from typing import Dict
def omegaconf_to_dict(d: DictConfig)->Dict:
"""Converts an omegaconf DictConfig to a python Dict, respecting variable interpolation."""
ret = {}
for k, v in d.items():
if isinstance(v, DictConfig):
ret[k] = omegaconf_to_dict(v)
elif isinstance(v, ListConfig):
ret[k] = list(v)
else:
ret[k] = v
return ret
def print_dict(val, nesting: int = -4, start: bool = True):
"""Outputs a nested dictionory."""
if type(val) == dict:
if not start:
print('')
nesting += 4
for k in val:
print(nesting * ' ', end='')
print(k, end=': ')
print_dict(val[k], nesting, start=False)
else:
print(val) | 2,390 | Python | 40.224137 | 95 | 0.703347 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/terrain_utils/terrain_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.random import choice
from scipy import interpolate
from math import sqrt
from omni.isaac.core.prims import XFormPrim
from pxr import UsdPhysics, Sdf, Gf, PhysxSchema
def random_uniform_terrain(terrain, min_height, max_height, step=1, downsampled_scale=None,):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(heights_range, (int(terrain.width * terrain.horizontal_scale / downsampled_scale), int(
terrain.length * terrain.horizontal_scale / downsampled_scale)))
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.interp2d(y, x, height_field_downsampled, kind='linear')
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(terrain.height_field_raw.dtype)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x-xx)) / center_x
yy = (center_y - np.abs(center_y-yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i-width, 4))
start_j = np.random.choice(range(0, j-length, 4))
terrain.height_field_raw[start_i:start_i+width, start_j:start_j+length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5*amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude*np.cos(yy / div) + amplitude*np.sin(xx / div)).astype(
terrain.height_field_raw.dtype)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width: (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1., depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height-1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0: stop_x, start_y: stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x: stop_x, 0: stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x: stop_x, start_y: stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols-1)*horizontal_scale, num_cols)
x = np.linspace(0, (num_rows-1)*horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[:num_rows-1, :] += (hf[1:num_rows, :] - hf[:num_rows-1, :] > slope_threshold)
move_x[1:num_rows, :] -= (hf[:num_rows-1, :] - hf[1:num_rows, :] > slope_threshold)
move_y[:, :num_cols-1] += (hf[:, 1:num_cols] - hf[:, :num_cols-1] > slope_threshold)
move_y[:, 1:num_cols] -= (hf[:, :num_cols-1] - hf[:, 1:num_cols] > slope_threshold)
move_corners[:num_rows-1, :num_cols-1] += (hf[1:num_rows, 1:num_cols] - hf[:num_rows-1, :num_cols-1] > slope_threshold)
move_corners[1:num_rows, 1:num_cols] -= (hf[:num_rows-1, :num_cols-1] - hf[1:num_rows, 1:num_cols] > slope_threshold)
xx += (move_x + move_corners*(move_x == 0)) * horizontal_scale
yy += (move_y + move_corners*(move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows*num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2*(num_rows-1)*(num_cols-1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols-1) + i*num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2*i*(num_cols-1)
stop = start + 2*(num_cols-1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start+1:stop:2, 0] = ind0
triangles[start+1:stop:2, 1] = ind2
triangles[start+1:stop:2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3]*num_faces))
terrain = XFormPrim(prim_path="/World/terrain",
name="terrain",
position=position,
orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 17,478 | Python | 42.917085 | 147 | 0.655166 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/terrain_utils/create_terrain_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 7,869 | Python | 43.213483 | 166 | 0.650654 |
BeanSamuel/Exchange-Rate-Prediction-RL/utils/usd_utils/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
""" Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
""" Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(assetPath=instance_usd_path, primPath=str(parent_prim.GetPath()))
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,639 | Python | 43.761904 | 126 | 0.675829 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/config.yaml | task:
name: Noob
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: ./train.csv
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
frame_bound:
- 1850
- 2850
train:
name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp:
- 256
- 256
critic_mlp:
- 256
- 256
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: true
normalize_value: false
normalize_advantage: true
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.75
gamma: 0.75
e_clip: 0.2
clip_value: false
learning_rate: 0.001
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
experiment: ''
num_envs: ''
seed: 42
torch_deterministic: false
rl_device: cpu
max_iterations: ''
test: false
checkpoint: ''
headless: false
enable_livestream: false
mt_timeout: 30
render: false
debug: false
wandb: true
save: true
profile: false
test_data: ''
| 1,197 | YAML | 19.305084 | 62 | 0.593985 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/actor_critic_model.py | from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 | Python | 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/replay_buffer.py | import torch
class ReplayBuffer():
def __init__(self, buffer_size, device):
self._head = 0
self._total_count = 0
self._buffer_size = buffer_size
self._device = device
self._data_buf = None
self._sample_idx = torch.randperm(buffer_size)
self._sample_head = 0
return
def reset(self):
self._head = 0
self._total_count = 0
self._reset_sample_idx()
return
def get_buffer_size(self):
return self._buffer_size
def get_total_count(self):
return self._total_count
def store(self, data_dict):
if (self._data_buf is None):
self._init_data_buf(data_dict)
n = next(iter(data_dict.values())).shape[0]
buffer_size = self.get_buffer_size()
assert(n < buffer_size)
for key, curr_buf in self._data_buf.items():
curr_n = data_dict[key].shape[0]
assert(n == curr_n)
store_n = min(curr_n, buffer_size - self._head)
curr_buf[self._head:(self._head + store_n)] = data_dict[key][:store_n]
remainder = n - store_n
if (remainder > 0):
curr_buf[0:remainder] = data_dict[key][store_n:]
self._head = (self._head + n) % buffer_size
self._total_count += n
return
def sample(self, n):
total_count = self.get_total_count()
buffer_size = self.get_buffer_size()
if self.is_empty():
return None
idx = torch.arange(self._sample_head, self._sample_head + n)
idx = idx % buffer_size
rand_idx = self._sample_idx[idx]
if (total_count < buffer_size):
rand_idx = rand_idx % self._head
samples = dict()
for k, v in self._data_buf.items():
samples[k] = v[rand_idx]
self._sample_head += n
if (self._sample_head >= buffer_size):
self._reset_sample_idx()
return samples
def _reset_sample_idx(self):
buffer_size = self.get_buffer_size()
self._sample_idx[:] = torch.randperm(buffer_size)
self._sample_head = 0
return
def _init_data_buf(self, data_dict):
buffer_size = self.get_buffer_size()
self._data_buf = dict()
for k, v in data_dict.items():
v_shape = v.shape[1:]
self._data_buf[k] = torch.zeros((buffer_size,) + v_shape, device=self._device)
return
def is_empty(self):
return self._total_count == 0
class ReplayBufferCPU(ReplayBuffer):
def __init__(self, buffer_size, device):
self.sample_device = device
super().__init__(buffer_size, device='cpu')
def sample(self, n):
x = super().sample(n)
if x is not None:
for k in x.keys():
x[k] = x[k].to(self.sample_device)
return x
| 2,897 | Python | 26.339622 | 90 | 0.534001 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/pg_agent.py | import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 | Python | 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/experience.py | import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 | Python | 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/utils.py | import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 | Python | 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/dataset.py | import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 | Python | 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/runs/Noob/20231116-200419/codes/ppo_agent.py | import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 | Python | 33.708474 | 123 | 0.582926 |
DimensionLab/fmmr-water-tank/server.py | import json
from typing import List, Dict, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from siml.siml_inferencer import WaterTankSimulator
class WaterTankSimulatorParameters(BaseModel):
inlet_velocity: float
class SimulatorSettings(BaseModel):
parameters: Union[WaterTankSimulatorParameters, None] = None
eco_mode: bool = False
class SimulatorInput(BaseModel):
parameters: Union[WaterTankSimulatorParameters, None] = None
resolution: List[int] = [32, 32, 32]
app = FastAPI()
FAKE_SIMULATORS_DB = {
"simulator123": WaterTankSimulator
}
LOADED_SIMULATORS = {}
@app.post("/init_simulator/{id}")
def simulate(id: str, settings: SimulatorSettings):
if id not in FAKE_SIMULATORS_DB:
raise HTTPException(status_code=404, detail="Simulator not found")
simulator_loader = FAKE_SIMULATORS_DB.get(id)
LOADED_SIMULATORS[id] = simulator_loader()
LOADED_SIMULATORS[id].eco = settings.eco_mode
LOADED_SIMULATORS[id].load_geometry()
LOADED_SIMULATORS[id].load_inferencer()
return {"message": "Simulator loaded."}
@app.post("/simulate/{id}")
def simulate(id: str, props: SimulatorInput):
if id not in LOADED_SIMULATORS:
raise HTTPException(status_code=404, detail="Simulator not loaded")
simulator = LOADED_SIMULATORS[id]
json_output = simulator.run_inference(props.parameters.inlet_velocity, props.resolution)
return json_output
# kept for testing the endpoint
@app.get("/hello")
def read_root():
return {"hello": "world"}
| 1,557 | Python | 24.129032 | 92 | 0.725755 |
DimensionLab/fmmr-water-tank/README.md | # Water tank simulator

Project commisioned for Faculty of materials, metallurgy and recyclation (FMMR) at Technical university of Košice.
This simulator mainly showcases the capabilities of parametrized AI-based physics simulator leveraging scientific deep learning methods (physics-informed neural networks - PINNs). The simple geometry has 1 inlet at the top and 2 outlets at the bottom.
| 509 | Markdown | 62.749992 | 251 | 0.821218 |
DimensionLab/fmmr-water-tank/siml/siml_inferencer.py | import sys, os
import json
import torch
import modulus
from sympy import Symbol, Eq, Abs, tanh
import numpy as np
import logging
from typing import List, Dict, Union
from pathlib import Path
from modulus.hydra.utils import compose
from modulus.hydra import to_yaml, to_absolute_path, instantiate_arch, ModulusConfig
from modulus.models.fully_connected import FullyConnectedArch
from modulus.domain.inferencer import (
OVVoxelInferencer,
)
from modulus.key import Key
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
from modulus.geometry.tessellation import Tessellation
from json import JSONEncoder
from water_tank.constants import bounds
from water_tank.src.geometry import WaterTank
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
# Process ['u', 'v', 'w'] output, add 'a' as "alpha" and flatten to 1D array
def flatten_to_uvwa(data):
lst = []
for i in range(len(data['u'])):
for index, item in enumerate(['u', 'v', 'w']):
pts = data[item][i]
lst.append(float(pts))
if item == 'w':
lst.append(float(1))
return lst
class WaterTankSimulator(object):
"""Water tank Inference runner for OV scenario
Args:
cfg (ModulusConfig): Parsed Modulus config
"""
def __init__(
self,
mask_value: float = -100,
):
logging.getLogger().addHandler(logging.StreamHandler())
self.cfg = compose(config_path="../water_tank/conf", config_name="config_eval", job_name="water_tank_inference")
print(to_yaml(self.cfg))
##############################
# Nondimensionalization Params
##############################
# fluid params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
self.nu = 1.787e-06 # m2 * s-1
self.inlet_vel = Symbol("inlet_velocity")
self.rho = 1
self.scale = 1.0
self._eco = False
self._inferencer = None
self.bounds = bounds
self.mask_value = mask_value
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, value: bool):
self._eco = value
if self._inferencer:
self._inferencer.eco = value
def load_inferencer(self, checkpoint_dir: Union[str, None] = None):
"""Create Modulus Water Tank inferencer object. This can take time since
it will initialize the model
Parameters
----------
checkpoint_dir : Union[str, None], optional
Directory to modulus checkpoint
"""
# make list of nodes to unroll graph on
ns = NavierStokes(nu=self.nu * self.scale, rho=self.rho, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
# self.progress_bar.value = 0.025
equation_nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
input_keys += [Key("inlet_velocity")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
)
self.flow_nodes = equation_nodes + [
flow_net.make_node(name="flow_network", jit=self.cfg.jit)
]
invar_keys = [
Key.from_str("x"),
Key.from_str("y"),
Key.from_str("z"),
Key.from_str("inlet_velocity"),
]
outvar_keys = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
]
self._inferencer = OVVoxelInferencer(
nodes=self.flow_nodes,
input_keys=invar_keys,
output_keys=outvar_keys,
mask_value=self.mask_value,
requires_grad=False,
eco=False,
# progress_bar=self.progress_bar, # TODO: implement setting progress
)
# Load checkpointed model
if checkpoint_dir is not None:
absolute_checkpoint_dir = Path(__file__).parent / checkpoint_dir
if absolute_checkpoint_dir.resolve().is_dir():
self._inferencer.load_models(absolute_checkpoint_dir.resolve())
else:
print("Could not find checkpointed model")
# Set eco
self._inferencer.eco = self.eco
def load_geometry(self):
# normalize meshes
def normalize_mesh(mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
stl_path = Path(self.data_path) / Path("stl_files")
self.interior_mesh = Tessellation.from_stl(
Path(stl_path) / Path("water_tank_closed.stl"), airtight=True
)
center = (0, 0, 0)
scale = 1.0
self.interior_mesh = normalize_mesh(self.interior_mesh, center, scale)
def run_inference(
self,
inlet_velocity: float,
resolution: List[int] = [256, 256, 256],
) -> Dict[str, np.array]:
"""Runs inference for Water Tank
Args:
resolution (List[int], optional): Voxel resolution. Defaults to [256, 256, 256].
Returns:
Dict[str, np.array]: Predicted output variables
"""
if self._inferencer is None:
print("Loading inferencer")
self.load_inferencer(checkpoint_dir="./checkpoints")
print("Loading geometry")
self.load_geometry()
# Eco mode settings
if self._inferencer.eco:
batch_size = 512
memory_fraction = 0.1
else:
vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
batch_size = int((vram_gb // 6) * 16 * 1024)
memory_fraction = 1.0
mask_fn = (
lambda x, y, z: self.interior_mesh.sdf({"x": x, "y": y, "z": z}, {})["sdf"]
< 0
)
sp_array = np.ones((np.prod(resolution), 1))
specific_params = {
"inlet_velocity": inlet_velocity * sp_array,
}
# Set up the voxel sample domain
self._inferencer.setup_voxel_domain(
bounds=self.bounds,
npoints=resolution,
invar=specific_params,
batch_size=batch_size,
mask_fn=mask_fn,
)
# Perform inference
invar, predvar = self._inferencer.query(memory_fraction)
return self._to_json(predvar)
@property
def data_path(self):
data_dir = Path(os.path.dirname(__file__)) / Path("../data")
return str(data_dir)
# Process ['u', 'v', 'w'] output, add 'a' as "alpha" and flatten to 1D array
def _flatten_to_uvwa(self, data):
lst = []
for i in range(len(data['u'])):
for index, item in enumerate(['u', 'v', 'w']):
pts = data[item][i]
lst.append(float(pts))
if item == 'w':
lst.append(float(1))
return lst
def _to_json(self, data):
data['u'] = np.reshape(data['u'], (-1, 1))
data['v'] = np.reshape(data['v'], (-1, 1))
data['w'] = np.reshape(data['w'], (-1, 1))
data['v'] = np.reshape(data['v'], (-1, 1))
numpyData = {"array": [], "uvw": self._flatten_to_uvwa(data)}
return json.dumps(numpyData, cls=NumpyArrayEncoder)
# def run_inference(self):
# self.inf_button.text = "Running Inference..."
# print("Water tank inferencer started")
# if self.simulator_runner.eco:
# resolution_x = 64
# resolution_y = 32
# resolution_z = 64
# else:
# resolution_x = 128
# resolution_y = 128
# resolution_z = 128
# if (resolution_x, resolution_y, resolution_z) != self.resolution:
# print(
# f"Initializing inferencer with a resolution of {resolution_x}*{resolution_y}*{resolution_z}"
# )
# self.resolution = [resolution_x, resolution_y, resolution_z]
# print(
# f"Will run inferencing for inlet_velocity={self.inlet_velocity}"
# )
# pred_vars = self.simulator_runner.run_inference(
# inlet_velocity=self.inlet_velocity,
# resolution=list(self.resolution),
# )
# shape = tuple(self.resolution)
# u = pred_vars["u"].reshape(shape)
# v = pred_vars["v"].reshape(shape)
# w = pred_vars["w"].reshape(shape)
# velocity = np.stack([u, v, w], axis=-1)
# if velocity.dtype != np.float32:
# velocity = velocity.astype(np.float32)
# if velocity.shape != shape + (3,):
# raise RuntimeError(f"expected shape: {shape + (3,)}; got: {velocity.shape}")
# # Change to z axis first for VTK input (not sure why)
# # Tensor comes out of inferencer in ij index form
# velocity = np.ascontiguousarray(velocity.transpose(2, 1, 0, 3))
# self.inf_progress.value = 0.95
# np.seterr(invalid="ignore")
# mask = np.where(velocity == self.simulator_runner.mask_value)
# velocity[mask] = 0.0
# velmag = np.linalg.norm(velocity, axis=3)
# # velmag = velmag / np.amax(velmag)
# minval = np.amin(velmag)
# maxval = np.amax(velmag)
# print("Test", maxval, minval)
# self._velocity = velocity
# self._velmag = velmag
# # self._mask = spatial_mask
# self._vel_mask = mask
# self._bounds = np.array(self.simulator_runner.bounds).flatten()
# print("WaterTankScenario inference ended")
# self._eval_complete = True
# self.inf_progress.value = 1.0
# self.inf_button.text = "Inference"
| 10,248 | Python | 31.536508 | 120 | 0.556401 |
DimensionLab/fmmr-water-tank/water_tank/constants.py | # Extension constants
bounds = [[-2, 2], [-1, 1], [-3, 3]]
resolution = (200, 200, 200)
| 88 | Python | 21.249995 | 36 | 0.556818 |
DimensionLab/fmmr-water-tank/water_tank/extension.py | # import os
# import torch
# import shutil
# import asyncio
# import traceback
# import omni.ext
# import omni.usd
# import omni.ui as ui
# import numpy as np
# from pathlib import Path
# from modulus.hydra.utils import compose
# from modulus_ext.ui.scenario import (
# ModulusOVScenario,
# ModulusOVButton,
# ModulusOVFloatSlider,
# ModulusOVIntSlider,
# ModulusOVToggle,
# ModulusOVRow,
# ModulusOVText,
# ModulusOVProgressBar,
# )
# from .visualizer import Visualizer
# from .water_tank_runner import ModulusWaterTankRunner
# from .constants import bounds
# from .src.water_tank import inlet_vel_range
# class WaterTankScenario(ModulusOVScenario):
# def __init__(self):
# self._init_task = asyncio.ensure_future(self.deferred_init())
# async def deferred_init(self):
# super().__init__(name="Water tank simulator Omniverse Extension")
# # Need to be a few frames in before init can occur.
# # This is required for auto-loading of the extension
# for i in range(15):
# await omni.kit.app.get_app().next_update_async()
# self.solver_train_initialized = False
# self.solver_eval_initialized = False
# self._eval_complete = False
# self.resolution = [128, 128, 128]
# vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
# eco = vram_gb < 13 # 12 Gb and below GPUs, turn on eco mode
# self.inlet_velocity = 1.5
# self.visualizer = Visualizer()
# self._usd_context = omni.usd.get_context()
# if self._usd_context.is_new_stage():
# self.load_template()
# param_text = ModulusOVText(
# desc="Input Parameters",
# )
# self.add(param_text)
# height_slider = ModulusOVFloatSlider(
# name="Inlet Velocity",
# desc="Inlet velocity from the top for Inference",
# default_value=self.inlet_velocity,
# bounds=inlet_vel_range,
# update_func=self.update_inlet_velocity,
# )
# self.add(height_slider)
# # Inference controls
# self.inf_button = ModulusOVButton(
# name="Inference",
# desc="Perform Inference",
# update_func=self.run_inference,
# )
# self.inf_button.run_in_main_thread = False
# self.add(self.inf_button)
# self.inf_progress = ModulusOVProgressBar(
# desc="Inference Progress", default_value=0.0
# )
# self.inf_progress.inference_scale = 0.7
# self.add(self.inf_progress)
# # Visualization actions
# isosurfaceButton = ModulusOVButton(
# name="Isosurface",
# desc="Generate Isosurface Visualization",
# update_func=self.generate_isosurface,
# )
# streamlineButton = ModulusOVButton(
# name="Streamline",
# desc="Generate Streamline Visualization",
# update_func=self.generate_streamlines,
# )
# sliceButton = ModulusOVButton(
# name="Slice",
# desc="Generate Slice Visualization",
# update_func=self.generate_slices,
# )
# button_row = ModulusOVRow(
# elements=[isosurfaceButton, streamlineButton, sliceButton]
# )
# self.add(button_row)
# # Isosuface controls
# control_text = ModulusOVText(
# desc="Isosurface Controls",
# )
# self.add(control_text)
# slider = ModulusOVFloatSlider(
# name="Isovalue",
# desc="Isosurface visualization isovalue",
# default_value=0.001,
# bounds=(0.001, 1.0),
# update_func=self.update_isovalue,
# )
# self.add(slider)
# # Streamline controls
# control_text = ModulusOVText(
# desc="Streamline Controls",
# )
# self.add(control_text)
# slider = ModulusOVIntSlider(
# name="Streamline Count",
# desc="Streamline visualization count",
# default_value=200,
# bounds=(1, 400),
# update_func=self.update_streamline_count,
# )
# self.add(slider)
# slider = ModulusOVFloatSlider(
# name="Streamline Step Size",
# desc="Step Size used for Calculating Streamlines",
# default_value=0.01,
# bounds=(0.001, 0.1),
# update_func=self.update_streamline_step_size,
# )
# self.add(slider)
# slider = ModulusOVIntSlider(
# name="Streamline Step Count",
# desc="Number of Integration Steps to Calculate Streamlines",
# default_value=1000,
# bounds=(1, 2000),
# update_func=self.update_streamline_step_count,
# )
# self.add(slider)
# slider = ModulusOVFloatSlider(
# name="Streamline Radius",
# desc="Radius of Streamline Tubes",
# default_value=0.02,
# bounds=(0.0001, 0.1),
# update_func=self.update_streamline_radius,
# )
# self.add(slider)
# # Slice controls
# control_text = ModulusOVText(
# desc="Slice Controls",
# )
# self.add(control_text)
# slider = ModulusOVFloatSlider(
# name="Slice X Offset",
# desc="Contour slice X offset from domain center",
# default_value=0.0,
# bounds=[bounds[0][0], bounds[0][1]],
# update_func=self.update_slice_x_offset,
# )
# self.add(slider)
# slider = ModulusOVFloatSlider(
# name="Slice Y Offset",
# desc="Contour slice Y offset from domain center",
# default_value=0.0,
# bounds=[bounds[1][0], bounds[1][1]],
# update_func=self.update_slice_y_offset,
# )
# self.add(slider)
# slider = ModulusOVFloatSlider(
# name="Slice Z Offset",
# desc="Contour slice Z offset from domain center",
# default_value=0.0,
# bounds=[bounds[2][0], bounds[2][1]],
# update_func=self.update_slice_z_offset,
# )
# self.add(slider)
# eco_toggle = ModulusOVToggle(
# name="Eco Mode",
# desc="For cards with limited memory",
# default_value=eco,
# update_func=self.toggle_eco,
# )
# self.add(eco_toggle)
# self.register()
# cfg = compose(config_name="config", config_path="conf", job_name="WaterTank")
# self.simulator_runner = ModulusWaterTankRunner(
# cfg, progress_bar=self.inf_progress
# )
# self.simulator_runner.eco = eco
# def load_template(self):
# print("loading template")
# usd_context = omni.usd.get_context()
# template_file = Path(os.path.dirname(__file__)) / Path(
# "../data/water_tank_template.usda"
# )
# self.template_temp_file = str(
# Path(os.path.dirname(__file__))
# / Path("../data/water_tank_template_temp.usda")
# )
# shutil.copyfile(template_file, self.template_temp_file)
# usd_context.open_stage(self.template_temp_file)
# def toggle_eco(self, value):
# print(f"Eco mode set to {value}")
# self.simulator_runner.eco = value
# def run_inference(self):
# self.inf_button.text = "Running Inference..."
# print("Water tank inferencer started")
# if self.simulator_runner.eco:
# resolution_x = 64
# resolution_y = 32
# resolution_z = 64
# else:
# resolution_x = 128
# resolution_y = 128
# resolution_z = 128
# if (resolution_x, resolution_y, resolution_z) != self.resolution:
# print(
# f"Initializing inferencer with a resolution of {resolution_x}*{resolution_y}*{resolution_z}"
# )
# self.resolution = [resolution_x, resolution_y, resolution_z]
# print(
# f"Will run inferencing for inlet_velocity={self.inlet_velocity}"
# )
# pred_vars = self.simulator_runner.run_inference(
# inlet_velocity=self.inlet_velocity,
# resolution=list(self.resolution),
# )
# shape = tuple(self.resolution)
# u = pred_vars["u"].reshape(shape)
# v = pred_vars["v"].reshape(shape)
# w = pred_vars["w"].reshape(shape)
# velocity = np.stack([u, v, w], axis=-1)
# if velocity.dtype != np.float32:
# velocity = velocity.astype(np.float32)
# if velocity.shape != shape + (3,):
# raise RuntimeError(f"expected shape: {shape + (3,)}; got: {velocity.shape}")
# # Change to z axis first for VTK input (not sure why)
# # Tensor comes out of inferencer in ij index form
# velocity = np.ascontiguousarray(velocity.transpose(2, 1, 0, 3))
# self.inf_progress.value = 0.95
# np.seterr(invalid="ignore")
# mask = np.where(velocity == self.simulator_runner.mask_value)
# velocity[mask] = 0.0
# velmag = np.linalg.norm(velocity, axis=3)
# # velmag = velmag / np.amax(velmag)
# minval = np.amin(velmag)
# maxval = np.amax(velmag)
# print("Test", maxval, minval)
# self._velocity = velocity
# self._velmag = velmag
# # self._mask = spatial_mask
# self._vel_mask = mask
# self._bounds = np.array(self.simulator_runner.bounds).flatten()
# print("WaterTankScenario inference ended")
# self._eval_complete = True
# self.inf_progress.value = 1.0
# self.inf_button.text = "Inference"
# def update_vis_data(self):
# if not all(v is not None for v in [self._velocity, self._velmag, self._bounds]):
# return
# self.visualizer.update_data(
# self._velocity, self._velmag, self._bounds, self._vel_mask, self.resolution
# )
# def update_inlet_velocity(self, value: float):
# self.inlet_velocity = value
# def update_isovalue(self, isovalue):
# print(f"Updating isovalue: {isovalue}")
# self.visualizer.parameters.isovalue = isovalue
# self.visualizer.update_generated()
# def update_streamline_count(self, streamline_count):
# print(f"Updating streamline_count: {streamline_count}")
# self.visualizer.parameters.streamline_count = streamline_count
# self.visualizer.update_generated()
# def update_streamline_step_size(self, streamline_step_size):
# print(f"Updating streamline_step_size: {streamline_step_size}")
# self.visualizer.parameters.streamline_step_size = streamline_step_size
# self.visualizer.update_generated()
# def update_streamline_step_count(self, streamline_step_count):
# print(f"Updating streamline_step_count: {streamline_step_count}")
# self.visualizer.parameters.streamline_step_count = streamline_step_count
# self.visualizer.update_generated()
# def update_streamline_radius(self, streamline_radius):
# print(f"Updating streamline_radius: {streamline_radius}")
# self.visualizer.parameters.streamline_radius = streamline_radius
# self.visualizer.update_generated()
# def update_slice_x_offset(self, slice_x_offset):
# print(f"Updating slice_x_offset: {slice_x_offset}")
# self.visualizer.parameters.slice_x_pos = slice_x_offset
# self.visualizer.update_generated()
# def update_slice_y_offset(self, slice_y_offset):
# print(f"Updating slice_y_offset: {slice_y_offset}")
# self.visualizer.parameters.slice_y_pos = slice_y_offset
# self.visualizer.update_generated()
# def update_slice_z_offset(self, slice_z_offset):
# print(f"Updating slice_z_offset: {slice_z_offset}")
# self.visualizer.parameters.slice_z_pos = slice_z_offset
# self.visualizer.update_generated()
# def generate_isosurface(self):
# if not self._eval_complete:
# print("Need to run inferencer first!")
# return
# self.update_vis_data()
# self.visualizer.generate_isosurface()
# def generate_streamlines(self):
# if not self._eval_complete:
# print("Need to run inferencer first!")
# return
# self.update_vis_data()
# self.visualizer.generate_streamlines()
# def generate_slices(self):
# if not self._eval_complete:
# print("Need to run inferencer first!")
# return
# self.update_vis_data()
# self.visualizer.generate_slices()
# # Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# # instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# # on_shutdown() is called.
# class WaterTankExt(omni.ext.IExt):
# # ext_id is current extension id. It can be used with extension manager to query additional information, like where
# # this extension is located on filesystem.
# def on_startup(self, ext_id):
# print("[modulus.scenario.WaterTank] Water tank scenario startup")
# self.scenario = WaterTankScenario()
# def on_shutdown(self):
# self.scenario.__del__()
# print("[modulus.scenario.WaterTank] Water tank scenario shutdown")
| 13,691 | Python | 35.512 | 121 | 0.581112 |
DimensionLab/fmmr-water-tank/water_tank/__init__.py | from .extension import * | 24 | Python | 23.999976 | 24 | 0.791667 |
DimensionLab/fmmr-water-tank/water_tank/water_tank_runner.py | import sys, os
import torch
import modulus
from sympy import Symbol, Eq, Abs, tanh
import numpy as np
import logging
from typing import List, Dict, Union
from pathlib import Path
from modulus.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.models.fully_connected import FullyConnectedArch
from modulus.domain.inferencer import (
OVVoxelInferencer,
)
from modulus_ext.ui.scenario import ModulusOVProgressBar
from modulus.key import Key
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
from modulus.geometry.tessellation import Tessellation
from .constants import bounds
from .src.geometry import WaterTank
class ModulusWaterTankRunner(object):
"""Water tank Inference runner for OV scenario
Args:
cfg (ModulusConfig): Parsed Modulus config
"""
def __init__(
self,
cfg: ModulusConfig,
progress_bar: ModulusOVProgressBar,
mask_value: float = -100,
):
logging.getLogger().addHandler(logging.StreamHandler())
##############################
# Nondimensionalization Params
##############################
# fluid params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
self.nu = 1.787e-06 # m2 * s-1
self.inlet_vel = Symbol("inlet_velocity")
self.rho = 1
self.scale = 1.0
self.cfg = cfg
self.progress_bar = progress_bar
self._eco = False
self._inferencer = None
self.bounds = bounds
self.mask_value = mask_value
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, value: bool):
self._eco = value
if self._inferencer:
self._inferencer.eco = value
def load_inferencer(self, checkpoint_dir: Union[str, None] = None):
"""Create Modulus Water Tank inferencer object. This can take time since
it will initialize the model
Parameters
----------
checkpoint_dir : Union[str, None], optional
Directory to modulus checkpoint
"""
# make list of nodes to unroll graph on
ns = NavierStokes(nu=self.nu * self.scale, rho=self.rho, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
self.progress_bar.value = 0.025
equation_nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
input_keys += [Key("inlet_velocity")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
)
self.flow_nodes = equation_nodes + [
flow_net.make_node(name="flow_network", jit=self.cfg.jit)
]
invar_keys = [
Key.from_str("x"),
Key.from_str("y"),
Key.from_str("z"),
Key.from_str("inlet_velocity"),
]
outvar_keys = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
]
self._inferencer = OVVoxelInferencer(
nodes=self.flow_nodes,
input_keys=invar_keys,
output_keys=outvar_keys,
mask_value=self.mask_value,
requires_grad=False,
eco=False,
progress_bar=self.progress_bar,
)
# Load checkpointed model
if checkpoint_dir is not None:
absolute_checkpoint_dir = Path(__file__).parent / checkpoint_dir
if absolute_checkpoint_dir.resolve().is_dir():
self._inferencer.load_models(absolute_checkpoint_dir.resolve())
else:
print("Could not find checkpointed model")
# Set eco
self._inferencer.eco = self.eco
def load_geometry(self):
# normalize meshes
def normalize_mesh(mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
stl_path = Path(self.data_path) / Path("stl_files")
self.interior_mesh = Tessellation.from_stl(
Path(stl_path) / Path("water_tank_closed.stl"), airtight=True
)
center = (0, 0, 0)
scale = 1.0
self.interior_mesh = normalize_mesh(self.interior_mesh, center, scale)
def run_inference(
self,
inlet_velocity: float,
resolution: List[int] = [256, 256, 256],
) -> Dict[str, np.array]:
"""Runs inference for Water Tank
Args:
resolution (List[int], optional): Voxel resolution. Defaults to [256, 256, 256].
Returns:
Dict[str, np.array]: Predicted output variables
"""
self.progress_bar.value = 0
if self._inferencer is None:
print("Loading Water Tank inferencer")
self.load_inferencer(checkpoint_dir="./checkpoints")
self.progress_bar.value = 0.05
print("Loading Water Tank geometry")
self.load_geometry()
self.progress_bar.value = 0.1
# Eco mode settings
if self._inferencer.eco:
batch_size = 512
memory_fraction = 0.1
else:
vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
batch_size = int((vram_gb // 6) * 16 * 1024)
memory_fraction = 1.0
mask_fn = (
lambda x, y, z: self.interior_mesh.sdf({"x": x, "y": y, "z": z}, {})["sdf"]
< 0
)
sp_array = np.ones((np.prod(resolution), 1))
specific_params = {
"inlet_velocity": inlet_velocity * sp_array,
}
# Set up the voxel sample domain
self._inferencer.setup_voxel_domain(
bounds=self.bounds,
npoints=resolution,
invar=specific_params,
batch_size=batch_size,
mask_fn=mask_fn,
)
self.progress_bar.value = 0.2
# Perform inference
invar, predvar = self._inferencer.query(memory_fraction)
# TODO: Remove should be changed to inside inferencer
self.progress_bar._prev_step = 0.0
self.progress_bar.value = 0.9
return predvar
@property
def data_path(self):
data_dir = Path(os.path.dirname(__file__)) / Path("../data")
return str(data_dir)
| 6,721 | Python | 30.12037 | 92 | 0.568517 |
DimensionLab/fmmr-water-tank/water_tank/inferencer.py | from modulus.hydra import to_yaml
from modulus.hydra.utils import compose
from modulus.solver import Solver
from modulus.domain import Domain
from modulus.domain.inferencer import PointwiseInferencer, VoxelInferencer
from src.geometry import WaterTank
from src.water_tank import network, constraints, inlet_vel
from src.plotter import generate_velocity_profile_3d, InferencerSlicePlotter2D
cfg = compose(config_path="conf", config_name="config_eval", job_name="water_tank_inference")
print(to_yaml(cfg))
def run():
geo = WaterTank()
domain = Domain()
nodes = network(cfg, scale=geo.scale)
constraints(cfg, geo=geo, nodes=nodes, domain=domain)
inlet_vel_inference = 0.1
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(100000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=1024,
requires_grad=False,
plotter=InferencerSlicePlotter2D()
)
domain.add_inferencer(inferencer, "simulation")
# add meshgrid inferencer
# mask_fn = lambda x, y, z: geo.interior_mesh.sdf({"x": x, "y": y, "z": z})[0] < 0
# voxel_inference = VoxelInferencer(
# bounds=[[-3, 3], [-3, 3], [-3, 3]],
# npoints=[128, 128, 128],
# nodes=nodes,
# output_names=["u", "v", "w", "p"],
# export_map={"u": ["u", "v", "w"], "p": ["p"]},
# mask_fn=mask_fn,
# batch_size=1024,
# requires_grad=False,
# )
# domain.add_inferencer(voxel_inference, "simulation_voxel")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
# generate velocity profile with magnitude (it has V = [u, v, w] in one array)
generate_velocity_profile_3d()
if __name__ == "__main__":
run()
| 1,831 | Python | 29.533333 | 107 | 0.633534 |
DimensionLab/fmmr-water-tank/water_tank/visualizer.py | import omni.usd
import omni.timeline
# Import the HPC visualization pipeline
from hpcvis.vtkm_bridge.core import get_bridge_interface
import numpy as np
from pxr import Sdf, Usd, UsdGeom, UsdUtils
import types
from dataclasses import dataclass
from typing import List
from .constants import bounds
# Put interface object publicly to use in our API
_vtkm_bridge = None
class VisParameters:
def __init__(self):
self.bounds = np.array(bounds).flatten()
self.isovalue = 0.001
self.streamline_count = 200
self.streamline_step_size = 0.01
self.streamline_step_count = 750
self.streamline_radius = 0.02
self.streamline_height = 0.0
self._slice_x_pos = 0.5
self._slice_y_pos = 0.5
self._slice_z_pos = 0.5
@property
def slice_x_pos(self):
return self._slice_x_pos
@slice_x_pos.setter
def slice_x_pos(self, offset):
self._slice_x_pos = max(
min((offset - self.bounds[0]) / (self.bounds[1] - self.bounds[0]), 1), 0
)
@property
def slice_y_pos(self):
return self._slice_y_pos
@slice_y_pos.setter
def slice_y_pos(self, offset):
self._slice_y_pos = max(
min((offset - self.bounds[2]) / (self.bounds[3] - self.bounds[2]), 1), 0
)
@property
def slice_z_pos(self):
return self._slice_z_pos
@slice_z_pos.setter
def slice_z_pos(self, offset):
self._slice_z_pos = max(
min((offset - self.bounds[4]) / (self.bounds[5] - self.bounds[4]), 1), 0
)
class Visualizer:
def __init__(self):
# Get the vtkm bridge context
self._vtkm_bridge = None
print(
f"[modulus_ext.scenario.water_tank.visualizer]_vtkm_bridge interface: {self._vtkm_bridge}"
)
self.parameters = VisParameters()
self.velocity = None
self.all_points = None
self.bounds = None
self._stage_id = None
self._isosurface_primname = None
self._streamlines_primname = None
self._slice_primname_x = None
self._slice_primname_y = None
self._slice_primname_z = None
self._seedpoints = None
self._usd_context = None
self.timeline = omni.timeline.acquire_timeline_interface()
def get_geometry_prim(self, bridge_prim_name: str):
stage = self._usd_context.get_stage()
new_suffix = "_geometry"
prim_name = bridge_prim_name.rsplit("_", maxsplit=1)[0] + new_suffix
return stage.GetPrimAtPath(f"/RootClass/geometries/{prim_name}")
def focus_prim(self, prim: Usd.Prim):
if not prim.IsValid():
return
self._usd_context.get_selection().set_selected_prim_paths(
[str(prim.GetPath())], True
)
try:
import omni.kit.viewport_legacy
viewport = omni.kit.viewport_legacy.get_viewport_interface()
if viewport:
viewport.get_viewport_window().focus_on_selected()
except:
raise
pass
def update_data(
self,
velocity: np.ndarray,
velmag: np.ndarray,
bounds: List[int],
mask: np.ndarray = None,
resolution: List[int] = [190, 190, 190],
):
self.velocity = velocity
self.bounds = bounds
self.velmag = velmag
def nan_ptp(a):
return np.ptp(a[np.isfinite(a)])
self.velmag = (self.velmag - np.nanmin(self.velmag))/nan_ptp(self.velmag)
coords_x = np.linspace(self.bounds[0], self.bounds[1], resolution[0])
coords_y = np.linspace(self.bounds[2], self.bounds[3], resolution[1])
coords_z = np.linspace(self.bounds[4], self.bounds[5], resolution[2])
Z, Y, X = np.meshgrid(coords_z, coords_y, coords_x, indexing="ij")
self.all_points = np.array(
np.transpose([C.flatten() for C in [X, Y, Z]]),
copy=True,
order="C",
dtype=np.float32,
)
duplicated_velmag = np.expand_dims(self.velmag, axis=-1)
np.seterr(invalid="ignore")
self.normalized_velocity = self.velocity / duplicated_velmag
#self.normalized_velocity = self.velocity / np.amax(self.velocity)
self.normalized_velocity[mask] = 0
self.update_stage()
self._vtkm_bridge.set_field_data("water_tank_velocity", velocity, n_components=3)
self._vtkm_bridge.set_field_data(
"water_tank_normalized_velocity", self.normalized_velocity, n_components=3
)
self._vtkm_bridge.set_field_data("water_tank_velmag", velmag, n_components=1)
self._vtkm_bridge.set_regular_grid_bounds("water_tank", *bounds)
self._vtkm_bridge.set_regular_grid_extent(
"water_tank", *tuple(reversed(velmag.shape[:3]))
)
if self._seedpoints is not None:
self._vtkm_bridge.set_points("water_tank_points", self._seedpoints)
self.update_generated()
def update_generated(self):
if self._isosurface_primname:
self.generate_isosurface()
if self._streamlines_primname:
self.generate_streamlines()
if self._slice_primname_x or self._slice_primname_y or self._slice_primname_z:
self.generate_slices()
def update_stage(self):
if self._vtkm_bridge is None:
self._vtkm_bridge = get_bridge_interface()
# Use the bridge to generate an isosurface on the data
if self._usd_context is None:
self._usd_context = omni.usd.get_context()
stage = self._usd_context.get_stage()
stage_cache = UsdUtils.StageCache.Get()
stage_id = stage_cache.GetId(stage).ToLongInt()
if stage_id == self._stage_id:
return
self._stage_id = stage_id
self._vtkm_bridge.set_stage(stage_id)
def random_subset(self, points, values, npoints=25):
nonzero_selection = self.velmag.ravel() > 0.001 # Only points with some velocity
points_nonzero = points[nonzero_selection]
velmag_nonzero = self.velmag.ravel()[nonzero_selection]
print(f"points_nonzero: {points_nonzero[:10]}")
print(f"velmag_nonzero: {velmag_nonzero[:10]}")
points_nonzero_shuffle = np.random.shuffle(points_nonzero)
points_subset = points_nonzero[:npoints]
velmag_subset = velmag_nonzero[:npoints]
return points_subset
def generate_streamlines(self):
self.update_stage()
# Use the bridge to generate streamlines on the data
np.random.seed(42)
self._seedpoints = self.random_subset(
self.all_points, self.velocity, npoints=self.parameters.streamline_count
)
self._vtkm_bridge.set_points("water_tank_points", self._seedpoints)
temp = self._streamlines_primname
self._streamlines_primname = self._vtkm_bridge.visualize_streamlines(
enabled=True,
streamline_name="water_tank_streamlines",
velocity_grid_name="water_tank",
velocity_data_array_name="water_tank_normalized_velocity",
sample_quantity_name="water_tank_velmag",
seed_points_name="water_tank_points",
step_size=self.parameters.streamline_step_size,
n_steps=int(self.parameters.streamline_step_count),
enable_tube_filter=True,
tube_radius=self.parameters.streamline_radius,
)
if not self._streamlines_primname:
print("Problem with streamline generation. Keeping old primname.")
self._streamlines_primname = temp
print(f"visualized streamlines: {self._streamlines_primname}")
if not temp and self._streamlines_primname:
prim = self.get_geometry_prim(self._streamlines_primname)
self.focus_prim(prim)
self.timeline.set_end_time(10)
def generate_isosurface(self):
self.update_stage()
# velocity magnitude isosurface
isosurface_prim = self._vtkm_bridge.visualize_isosurface(
enabled=True,
isosurface_name="water_tank_isosurface",
regular_grid_name="water_tank",
field_data_name="water_tank_velmag",
sample_quantity_name="water_tank_velmag",
isovalue=self.parameters.isovalue,
)
print(f"visualized isosurface: {self._isosurface_primname}")
if not self._isosurface_primname:
print("Problem with isosurface generation. Keeping old primname.")
self._isosurface_primname = isosurface_prim
if not isosurface_prim and self._isosurface_primname:
prim = self.get_geometry_prim(self._isosurface_primname)
self.focus_prim(prim)
def generate_slices(self):
self.update_stage()
temp_x = self._slice_primname_x
temp_y = self._slice_primname_y
temp_z = self._slice_primname_z
# Use the bridge to generate slices for the data
self._slice_primname_x = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="water_tank_slice_x",
regular_grid_name="water_tank",
field_data_name="water_tank_velmag",
az=90.,
el=0.0,
pos=self.parameters.slice_x_pos,
)
print(f"visualized slice: {self._slice_primname_x}")
self._slice_primname_y = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="water_tank_slice_y",
regular_grid_name="water_tank",
field_data_name="water_tank_velmag",
az=0.0,
el=0.0,
pos=self.parameters.slice_y_pos,
)
print(f"visualized slice: {self._slice_primname_y}")
self._slice_primname_z = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="water_tank_slice_z",
regular_grid_name="water_tank",
field_data_name="water_tank_velmag",
az=0.0,
el=90,
pos=self.parameters.slice_z_pos,
)
print(f"visualized slice: {self._slice_primname_z}")
if not self._slice_primname_x:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_x = temp_x
if not self._slice_primname_y:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_y = temp_y
if not self._slice_primname_z:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_z = temp_z
if not temp_z and self._slice_primname_z:
prim = self.get_geometry_prim(self._slice_primname_z)
self.focus_prim(prim)
| 10,829 | Python | 34.276873 | 102 | 0.599224 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.