repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/dr_train.py | import numpy as np
import matplotlib.pyplot as plt
import datetime
# from tqdm import tqdm
from pathlib import Path
from env import Environment
from dr_agent import Agent
'''Augmented State Implementation of Tabular-Q'''
algorithms = ['SARSA', 'Q']
for algorithm in algorithms:
delays = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
runs = 10
for delay in delays:
for run in range(runs):
seed = np.random.seed(run)
env = Environment(seed, delay) # Initialize Environment
agent = Agent(env.state_space, env.num_actions, delay) # Initialize Q-learning agent
episodes = int(1e5)
alpha = 0.1
gamma = 0.995
lambda_trace = 0.0 # lambda value for eligibility traces
all_rewards = []
file_dir = 'Results-v1.0-cumulative/' + 'maze_' + algorithm + '_lambda_' + str(lambda_trace) + '_' + str(delay)
Path(file_dir).mkdir(parents=True, exist_ok=True)
filename = file_dir + '/' + str(run)
for episode in range(episodes):
rewards = 0
state = env.state
agent.fill_up_buffer(state)
action, agent_action = agent.choose_action(state)
if episode % 1000 == 0:
agent.update_epsilon(0.995*agent.epsilon)
for ep_step in range(env.turn_limit):
next_action_history = tuple(agent.actions_in_buffer)
action_history = tuple(agent.actions_in_buffer_prev)
next_state, reward, done = env.step(state, action)
next_action, agent_next_action = agent.choose_action(next_state)
if algorithm == 'Q':
# agent.E[state[0], state[1], agent_action] += 1 # accumulating traces
"""Q-Learning"""
td_error = reward + gamma*np.max(agent.Q_values[(next_state[0], next_state[1]) + next_action_history]) \
- agent.Q_values[(state[0], state[1]) + action_history + (agent_action,)]
elif algorithm == 'SARSA':
# agent.E[(state[0], state[1]) + agent_action] += 1 # accumulating traces
"""SARSA"""
td_error = reward + gamma*agent.Q_values[(next_state[0], next_state[1]) + next_action_history +
(agent_next_action,)] - agent.Q_values[(state[0], state[1]) +
action_history + (
agent_action,)]
else:
raise Exception('Algorithm Undefined')
agent.Q_values[(state[0], state[1]) + action_history + (agent_action,)] += alpha * td_error
'''Trace Calculation'''
# for s_x in range(env.breadth):
# for s_y in range(env.length):
# for a in range(env.num_actions):
# agent.Q_values[s_x, s_y, a] += \
# alpha * td_error * agent.E[s_x, s_y, a]
# agent.E[s_x, s_y, a] = \
# gamma * lambda_trace * agent.E[s_x, s_y, a]
state = next_state
action = next_action
agent_action = agent_next_action
rewards += reward
if done:
'''Verbose'''
# print('\nEpisode: {}, Rewards: {} \r'.format(episode, rewards))
break
all_rewards.append(rewards)
np.save(filename, all_rewards)
| 3,912 | 53.347222 | 128 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/agent.py | import numpy as np
from collections import deque
'''Q-learning agent for the baselines'''
class Agent:
def __init__(self, state_space, num_actions, delay):
self.epsilon = 1.0
self.num_actions = num_actions
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.Q_values = np.zeros([state_space.shape[0], state_space.shape[1], num_actions])
# self.E = np.zeros([state_space.shape[0], state_space.shape[1], num_actions]) # eligibily trace
@staticmethod
def randargmax(b, **kw):
""" a random tie-breaking argmax"""
return np.argmax(np.random.random(b.shape) * (b == b.max()), **kw)
def update_epsilon(self, epsilon):
self.epsilon = epsilon
"""fill up action buffer with the action from the current state"""
def fill_up_buffer(self, state):
for _ in range(self.delay):
action = self.act(state)
self.actions_in_buffer.append(action)
def choose_action(self, state):
if self.delay == 0:
return self.act(state), self.act(state) # return undelayed action
action = self.actions_in_buffer.popleft() # get delayed action
next_action = self.act(state)
self.actions_in_buffer.append(next_action) # put undelayed action into the buffer
return action, next_action
def act(self, state):
if self.epsilon < np.random.random(): # exploration
action = self.randargmax(self.Q_values[state[0], state[1]])
else:
action = np.random.randint(self.num_actions) # greedy
return action
| 1,625 | 35.954545 | 105 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/plot.py | import matplotlib.pyplot as plt
import numpy as np
import os
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
"""Plotting"""
algorithms = ['Q']
# delays = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
delays = [2, 4, 6, 8, 10]
runs = 10
lambda_trace = 0.0
for algorithm in algorithms:
rewards = {}
rewards_err = {}
episodes = 10000
for delay in delays:
reward_runs = np.zeros(runs)
for run in range(runs):
filename = 'Maze-Tabular\\Results-v3-cumulative\\maze_' + algorithm + '_lambda_' + str(lambda_trace) + '_' + str(delay) \
+ '\\' + str(run) + '.npy'
reward_current = np.load(filename)[-episodes-1:-1]
reward_runs[run] = np.mean(reward_current)
rewards[delay] = np.mean(reward_runs)
rewards_err[delay] = np.std(reward_runs, axis=0)
alg = 'DRQ'
color = u'#1f77b4'
# plt.plot(list(rewards.keys()), list(rewards.values()), marker='o', label=alg, color=color)
plt.errorbar(list(rewards.keys()), list(rewards.values()), yerr=list(rewards_err.values()),
uplims=True, lolims=True, label=alg, color=color)
plt.title('W-Maze', fontsize=20)
plt.xticks(list(rewards.keys()))
algorithms = ['Q', 'dQ']
for algorithm in algorithms:
rewards = {}
rewards_err = {}
for delay in delays:
reward_runs = np.zeros(runs)
for run in range(runs):
filename = 'Maze-Tabular\\Results-v3\\maze_' + algorithm + '_lambda_' + str(0.0) + '_' + str(delay) \
+ '\\' + str(run) + '.npy'
reward_current = np.load(filename)[-episodes-1:-1]
reward_runs[run] = np.mean(reward_current)
rewards[delay] = np.mean(reward_runs)
rewards_err[delay] = np.std(reward_runs, axis=0)
if algorithm == 'dQ':
alg = 'delay-Q'
color = 'red'
else:
alg = 'Q'
color = u'#2ca02c'
# plt.plot(list(rewards.keys()), list(rewards.values()), marker='o', label=alg, color=color)
plt.errorbar(list(rewards.keys()), list(rewards.values()), yerr=list(rewards_err.values()),
uplims=True, lolims=True, label=alg, color=color)
plt.legend()
plt.xlabel('Delays', fontsize=16)
plt.xticks(fontsize=16)
plt.ylabel('Rewards', fontsize=16)
plt.yticks(fontsize=16)
save_dir = os.getcwd() + '/Maze-Tabular/Plots/'
try:
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
except FileNotFoundError:
os.makedirs(os.getcwd() + '/Maze-Tabular/Plots/')
plt.savefig(save_dir + '/rewards_comparison.pdf', bbox_inches="tight")
plt.tight_layout()
plt.show()
plt.show()
| 2,721 | 34.815789 | 133 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/train.py | import numpy as np
import matplotlib.pyplot as plt
import datetime
# from tqdm import tqdm
from pathlib import Path
from env import Environment
from agent import Agent
'''Training baseline algorithms'''
algorithms = ['SARSA', 'Q', 'dSARSA', 'dQ'] # dSARSA and dQ are from https://ieeexplore.ieee.org/document/5650345
for algorithm in algorithms:
delays = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
runs = 10
for delay in delays:
for run in range(runs):
seed = np.random.seed(run)
env = Environment() # Initialize Environment
agent = Agent(env.state_space, env.num_actions, delay) # Initialize Q-learning agent
episodes = int(1e5)
alpha = 0.1
gamma = 0.995
lambda_trace = 0.0 # lambda value for eligibility traces
all_rewards = []
file_dir = 'Results-v1.0/' + 'maze_' + algorithm + '_lambda_' + str(lambda_trace) + '_' + str(delay) # 1.0 is version number
Path(file_dir).mkdir(parents=True, exist_ok=True)
filename = file_dir + '/' + str(run)
for episode in range(episodes):
rewards = 0
state = env.state
agent.fill_up_buffer(state)
action, agent_action = agent.choose_action(state)
if episode % 1000 == 0:
agent.update_epsilon(0.95*agent.epsilon)
for ep_step in range(env.turn_limit):
next_state, reward, done = env.step(state, action)
next_action, agent_next_action = agent.choose_action(next_state)
if algorithm == 'dQ':
# agent.E[state[0], state[1], action] += 1 # accumulating traces
"""Q-Learning"""
td_error = reward + gamma*np.max(agent.Q_values[next_state[0], next_state[1]]) - \
agent.Q_values[state[0], state[1], action]
elif algorithm == 'dSARSA':
# agent.E[state[0], state[1], action] += 1 # accumulating traces
"""SARSA"""
td_error = reward + gamma*agent.Q_values[next_state[0], next_state[1], next_action] - \
agent.Q_values[state[0], state[1], action]
elif algorithm == 'Q':
# agent.E[state[0], state[1], agent_action] += 1 # accumulating traces
"""Q-Learning"""
td_error = reward + gamma*np.max(agent.Q_values[next_state[0], next_state[1]]) - \
agent.Q_values[state[0], state[1], agent_action]
elif algorithm == 'SARSA':
# agent.E[state[0], state[1], agent_action] += 1 # accumulating traces
"""SARSA"""
td_error = reward + gamma*agent.Q_values[next_state[0], next_state[1], agent_next_action] - \
agent.Q_values[state[0], state[1], agent_action]
else:
raise Exception('Algorithm Undefined')
if algorithm.startswith('d'):
agent.Q_values[state[0], state[1], action] += alpha * td_error # take effective action into update equation
else:
agent.Q_values[state[0], state[1], agent_action] += alpha * td_error
'''Trace Calculation'''
# for s_x in range(env.breadth):
# for s_y in range(env.length):
# for a in range(env.num_actions):
# agent.Q_values[s_x, s_y, a] += \
# alpha * td_error * agent.E[s_x, s_y, a]
# agent.E[s_x, s_y, a] = \
# gamma * lambda_trace * agent.E[s_x, s_y, a]
state = next_state
action = next_action
agent_action = agent_next_action
rewards += reward
if done:
'''Verbose'''
# print('\nEpisode: {}, Rewards: {} \r'.format(episode, rewards))
break
all_rewards.append(rewards)
np.save(filename, all_rewards)
| 4,409 | 52.13253 | 137 | py |
DelayResolvedRL | DelayResolvedRL-main/W-Maze/Tabular-Q/env.py | import numpy as np
from collections import deque
class Environment:
"""Initialize Environment"""
def __init__(self, seed, delay):
np.random.seed(seed)
self.breadth = 7
self.length = 11
self.state_space = np.empty([self.breadth, self.length], dtype='<U1')
'''Environment Configuration'''
self.state_space[:] = 'E'
self.state_space[0] = 'X'
self.state_space[1:4, self.length // 2 - 2] = 'X'
self.state_space[1:4, self.length // 2 + 2] = 'X'
self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G'
self.state_space[self.breadth - 1, 0] = 'P'
'''Actions'''
self.actions = [0, 1, 2, 3] # UP, DOWN, LEFT, RIGHT
self.num_actions = len(self.actions)
self.turn_limit = 300
self.delay = delay
self.actions_in_buffer = deque(maxlen=self.delay)
self.fill_up_buffer()
self.delayed_action = 0
self.state = self.reset()
def reset(self):
x = np.random.randint(self.breadth)
y = 0
starting_state = [x, y]
self.state_space[x, y] = 'P'
self.fill_up_buffer()
return starting_state
def fill_up_buffer(self):
for _ in range(self.delay):
action = np.random.choice(self.num_actions)
self.actions_in_buffer.append(action)
def step(self, state, action):
done = False
player_position = state
reward = -1
"""UP"""
if action == 0:
if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] - 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0] - 1, player_position[1]]
self.state_space[player_position[0] - 1, player_position[1]] = 'P'
"""DOWN"""
if action == 1:
if player_position[0] + 1 < self.breadth \
and self.state_space[player_position[0] + 1, player_position[1]] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0] + 1, player_position[1]] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0] + 1, player_position[1]
self.state_space[player_position[0] + 1, player_position[1]] = 'P'
"""LEFT"""
if action == 2:
if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] - 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = player_position[0], player_position[1] - 1
self.state_space[player_position[0], player_position[1] - 1] = 'P'
"""RIGHT"""
if action == 3:
if player_position[1] + 1 < self.length \
and self.state_space[player_position[0], player_position[1] + 1] != 'X':
self.state_space[player_position[0], player_position[1]] = 'E'
if self.state_space[player_position[0], player_position[1] + 1] == 'G':
done = True
self.state = self.reset()
reward = 11
else:
self.state = [player_position[0], player_position[1] + 1]
self.state_space[player_position[0], player_position[1] + 1] = 'P'
return self.state, reward, done
| 4,070 | 42.774194 | 115 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/init_main.py | import gym
from delayed_env import DelayedEnv
import wandb
"""Code adapted from https://openreview.net/forum?id=j1RMMKeP2gR"""
'''HyperParameters'''
# MountainCar-v0
# Number of Runs:10 \\
# Number of Frames: 1 Million \\
# Batch Size: 32 \\
# $\gamma$: 0.99 \\
# Learning Rate: 1e-3 \\
# Learning Rate: 1e-3 \\
# $\epsilon$-Start: 1.0 \\
# $\epsilon$-Stop: 1e-4 \\
# $\epsilon$-Decay: 1e-4 \\
# Hidden Units: [200, 200] (Action delays)\\
# Hidden Units: [200] (Observation delays)\\
# Forward Model: [200, 200] \\
# Replay Buffer Size: 1000 \\
# Target Network Frequency Update: 300 \\
# Acrobot-v1
# Number of Runs:10 \\
# Number of Frames: 1 Million \\
# Batch Size: 32 \\
# $\gamma$: 0.99 \\
# Learning Rate: 1e-3 \\
# $\epsilon$-Start: 1.0 \\
# $\epsilon$-Stop: 1e-4 \\
# $\epsilon$-Decay: 1e-4 \\
# Hidden Units: [200] \\
# Forward Model: [200] \\
# Replay Buffer Size: 1000 \\
# Target Network Frequency Update: 25 \\
# Reshaped CartPole-v0
# Number of Runs:10 \\
# Number of Frames: 1 Million \\
# Batch Size: 32 \\
# $\gamma$: 0.99 \\
# Learning Rate: 1e-3 \\
# $\epsilon$-Start: 1.0 \\
# $\epsilon$-Stop: 1e-4 \\
# $\epsilon$-Decay: 1e-4 \\
# Hidden Units: [200] \\
# Forward Model: [24, 24] \\
# Replay Buffer Size: 1000 \\
# Target Network Frequency Update: 300 \\
# CartPole-v0
# Number of Runs:10 \\
# Number of Frames: 1 Million \\
# Batch Size: 32 \\
# $\gamma$: 0.99 \\
# Learning Rate: 1e-3 \\
# $\epsilon$-Start: 1.0 \\
# $\epsilon$-Stop: 1e-4 \\
# $\epsilon$-Decay: 1e-4 \\
# Hidden Units: [200] \\
# Forward Model: [200] \\
# Replay Buffer Size: 1000 \\
# Target Network Frequency Update: 25 \\
def init_main(algorithm, delay, seed):
hyperparameter_defaults = dict(
is_delayed_agent=False,
double_q=True,
delay_value=delay,
epsilon_decay=1e-4,
epsilon_min=0.001, #0.001
learning_rate=0.001, #0.005, #mountainCar: 0.0001
seed=seed,
epsilon=1.0,
use_m_step_reward=False,
use_latest_reward=False,
use_reward_shaping=False,
physical_noise_std_ratio=0.0, #0.1
env_name='CartPole-v0', #'CartPole-v1', 'Acrobot-v1', 'MountainCar-v0'
train_freq=1,
target_network_update_freq=300,
use_learned_forward_model=True,
agent_type=algorithm, # 'augmented' (DRDQN), 'delayed' (https://openreview.net/forum?id=j1RMMKeP2gR), 'oblivious' (DQN), 'delay' (https://ieeexplore.ieee.org/document/5650345)
total_steps=3000,
)
# Pass your defaults to wandb.init
wandb.init(project='delay-rl', name='DQN_{}_delay_{}_seed_{}'.format(algorithm, delay, seed),
config=hyperparameter_defaults)
config = wandb.config
if 'CartPole' in config.env_name or 'Acrobot' in config.env_name:
orig_env = gym.make(config.env_name)
orig_env.seed(seed)
else:
orig_env = gym.make(config.env_name)
orig_env.seed(seed)
# orig_env = DiscretizeActions(orig_env) # for mujoco envs
delayed_env = DelayedEnv(orig_env, config.delay_value)
state_size = orig_env.observation_space.shape
if not delayed_env.is_atari_env:
state_size = state_size[0]
action_size = orig_env.action_space.n
done = False
batch_size = 32
return config, delayed_env, state_size, action_size, done, batch_size
| 3,328 | 28.990991 | 183 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/dqn_agents.py | from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from copy import deepcopy
import random
from keras.optimizers import Adam
from keras import backend as K
import tensorflow as tf
import numpy as np
def reshape_state(state, is_atari_env, state_size):
reshaped = state
if not is_atari_env:
reshaped = np.reshape(state, [1, state_size])
else:
if len(state.shape) < 4:
reshaped = np.expand_dims(state, axis=0)
return reshaped
def update_loss(loss, sample_loss):
if loss is not None and sample_loss is not None:
for key, val in sample_loss.items():
if key in loss:
loss[key] += val
else:
loss[key] = val
def concatenate_state_action(state, action):
out = np.concatenate((state[0], [action]))
out = np.reshape(out, [1, len(out)])
return out
class DQNAgent:
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False, use_latest_reward=True,
loss='mse', **kwargs):
np.random.seed(seed)
tf.random.set_seed(seed)
random.seed(seed)
self.state_size = state_size
self.action_size = action_size
self.is_atari_env = is_atari_env
mem_len = 50000 if self.is_atari_env else 1000
self.memory = deque(maxlen=mem_len)
self.gamma = 0.99 # discount rate
self.epsilon = epsilon # exploration rate
self.epsilon_min = epsilon_min
self.epsilon_decay = epsilon_decay
self.learning_rate = learning_rate
self.sample_buffer = deque()
self.is_delayed_agent = is_delayed_agent
self.delay_value = delay_value
self.model = self._build_model(loss=loss)
self.use_m_step_reward = use_m_step_reward
self.use_latest_reward = use_latest_reward
def _huber_loss(self, y_true, y_pred, clip_delta=1.0):
"""Huber loss for Q Learning
References: https://en.wikipedia.org/wiki/Huber_loss
https://www.tensorflow.org/api_docs/python/tf/losses/huber_loss
"""
error = y_true - y_pred
cond = K.abs(error) <= clip_delta
squared_loss = 0.5 * K.square(error)
quadratic_loss = 0.5 * K.square(clip_delta) + clip_delta * (K.abs(error) - clip_delta)
return K.mean(tf.where(cond, squared_loss, quadratic_loss))
def _build_forward_model(self, loss='mse', input_size=None, output_size=None):
input_size = self.state_size if input_size is None else input_size
output_size = self.action_size if output_size is None else output_size
model = Sequential()
model.add(Dense(200, input_dim=input_size, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(output_size, activation='linear'))
model.compile(loss=loss,
optimizer=Adam(lr=self.learning_rate))
return model
def _build_model(self, loss=None, input_size=None, output_size=None):
loss = self._huber_loss if loss is 'huber' else loss
input_size = self.state_size if input_size is None else input_size
output_size = self.action_size if output_size is None else output_size
# Neural Net for Deep-Q learning Model
model = Sequential()
if self.is_atari_env:
model.add(Conv2D(32, 8, strides=(4,4), input_shape=input_size, activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(64, 4, strides=(2,2), activation='relu'))
model.add(MaxPool2D())
model.add(Conv2D(64, 3, strides=(1,1), activation='relu'))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(output_size, activation='linear'))
else:
# model.add(Dense(24, input_dim=input_size, activation='relu'))
# model.add(Dense(24, activation='relu'))
# model.add(Dense(output_size, activation='linear'))
model.add(Dense(200, input_dim=input_size, activation='tanh', kernel_initializer='RandomNormal'))
# model.add(Dense(200, activation='tanh'))
model.add(Dense(output_size, activation='linear', kernel_initializer='RandomNormal'))
model.compile(loss=loss,
optimizer=Adam(lr=self.learning_rate))
return model
def memorize(self, state, action, reward, next_state, done):
if self.is_delayed_agent:
# for earlier time than delay_value, the data is problematic (non-delayed response)
# Construct modified tuple by keeping old s_t with new a_{t+m}, r_{t+m} s_{t+m+1}
new_tuple = (state, action, reward, next_state, done)
self.sample_buffer.append(new_tuple)
if len(self.sample_buffer) - 1 >= self.delay_value:
old_tuple = self.sample_buffer.popleft()
modified_tuple = list(deepcopy(old_tuple))
modified_tuple[1] = action
modified_tuple[2] = self.m_step_reward(first_reward=old_tuple[2])
# trying to use s_{t+1} instead of s_{t+m} as in the original ICML2020 submission
# modified_tuple[3] = next_state
modified_tuple = tuple(modified_tuple)
self.memory.append(modified_tuple)
else:
self.memory.append((state, action, reward, next_state, done))
def act(self, state, eval=False):
if not eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def m_step_reward(self, first_reward):
if not self.use_m_step_reward:
if self.use_latest_reward:
return self.sample_buffer[-1][2]
else:
return first_reward
else:
discounted_rew = first_reward
for i in range(self.delay_value):
discounted_rew += self.gamma ** (i + 1) * self.sample_buffer[i][2]
return discounted_rew
def effective_gamma(self):
return self.gamma if not self.use_m_step_reward else (self.gamma ** (self.delay_value + 1))
def replay(self, batch_size, global_step):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.effective_gamma() *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
# self.model.fit(state, target_f, epochs=1, verbose=0,
# callbacks=[WandbCallback()])
self.model.fit(state, target_f, epochs=1, verbose=0)
self.epsilon = self.epsilon_min + (1.0 - self.epsilon_min) * np.exp(-self.epsilon_decay * global_step)
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def clear_action_buffer(self):
self.sample_buffer.clear()
class DDQNAgent(DQNAgent):
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False, use_latest_reward=True):
super().__init__(seed, state_size, action_size, is_atari_env=is_atari_env, is_delayed_agent=is_delayed_agent, delay_value=delay_value,
epsilon_min=epsilon_min, epsilon_decay=epsilon_decay, learning_rate=learning_rate,
epsilon=epsilon, use_m_step_reward=use_m_step_reward, use_latest_reward=use_latest_reward,
loss='huber')
# self.model = self._build_model()
self.target_model = self._build_model(loss='mse')
self.update_target_model()
def update_target_model(self):
# copy weights from model to target_model
self.target_model.set_weights(self.model.get_weights())
def train_model(self, batch):
state_vec, action_vec, reward_vec, next_state_vec, done_vec = batch
target = self.model.predict(state_vec)
t = self.target_model.predict(next_state_vec)
not_done_arr = np.invert(np.asarray(done_vec))
new_targets = reward_vec + not_done_arr * self.effective_gamma() * np.amax(t, axis=1)
for i in range(len(batch[0])):
target[i][action_vec[i]] = new_targets[i]
train_history = self.model.fit(state_vec, target, epochs=1, verbose=0)
q_loss = train_history.history['loss'][0]
loss_dict = {'q_loss': q_loss}
return loss_dict
def _create_batch(self, indices):
state_vec, action_vec, reward_vec, next_state_vec, done_vec = [], [], [], [], []
for i in indices:
data = self.memory[i]
state, action, reward, next_state, done = data
state_vec.append(np.array(state, copy=False))
action_vec.append(action)
reward_vec.append(reward)
next_state_vec.append(np.array(next_state, copy=False))
done_vec.append(done)
return np.concatenate(state_vec, axis=0), action_vec, reward_vec, np.concatenate(next_state_vec, axis=0), done_vec
def replay(self, batch_size, global_step):
loss = {}
indices = np.random.choice(len(self.memory), batch_size)
batch = self._create_batch(indices)
sample_loss = self.train_model(batch)
update_loss(loss, sample_loss)
self.epsilon = self.epsilon_min + (1.0 - self.epsilon_min) * np.exp(-self.epsilon_decay * global_step)
return loss
class DDQNPlanningAgent(DDQNAgent):
def __init__(self, seed, state_size, action_size, is_atari_env, is_delayed_agent=False, delay_value=0, epsilon_min=0.001,
epsilon_decay=0.999, learning_rate=0.001, epsilon=1.0, use_m_step_reward=False,
use_latest_reward=True, env=None, use_learned_forward_model=True):
super().__init__(seed, state_size, action_size, is_atari_env=is_atari_env, is_delayed_agent=is_delayed_agent, delay_value=delay_value,
epsilon_min=epsilon_min, epsilon_decay=epsilon_decay, learning_rate=learning_rate,
epsilon=epsilon, use_m_step_reward=use_m_step_reward, use_latest_reward=use_latest_reward)
self.use_learned_forward_model = use_learned_forward_model
if self.use_learned_forward_model:
keras_forward_model = self._build_forward_model(loss='mse', input_size=self.state_size + 1, output_size=self.state_size)
self.forward_model = ForwardModel(keras_forward_model)
else:
self.forward_model = env
def train_model(self, batch):
loss_dict = super().train_model(batch)
if self.use_learned_forward_model and self.delay_value > 0:
state_vec, action_vec, _, next_state_vec, _ = batch
act_t = np.asarray([action_vec]).transpose()
concat_vec = np.concatenate((state_vec, act_t), axis=1)
train_history = self.forward_model.keras_model.fit(concat_vec, next_state_vec, epochs=1, verbose=0)
f_model_loss = train_history.history['loss'][0]
loss_dict['f_model_loss'] = f_model_loss
return loss_dict
def act(self, state, pending_actions, eval):
if not eval and np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
last_state = state
if self.delay_value > 0:
if not self.use_learned_forward_model:
self.forward_model.store_initial_state()
# initial_state = deepcopy(state)
for curr_action in pending_actions:
last_state = self.forward_model.get_next_state(state=last_state, action=curr_action)
if not self.use_learned_forward_model:
self.forward_model.restore_initial_state()
last_state_r = reshape_state(last_state, self.is_atari_env, self.state_size)
act_values = self.model.predict(last_state_r)
return np.argmax(act_values[0]) # returns best action for last state
def memorize(self, state, action, reward, next_state, done):
# for earlier time than delay_value, the data is problematic (non-delayed response)
# Construct modified tuple by keeping old s_t with new a_{t+m}, r_{t+m} s_{t+m+1}
new_tuple = (state, action, reward, next_state, done)
self.sample_buffer.append(new_tuple)
if len(self.sample_buffer) - 1 >= self.delay_value:
old_tuple = self.sample_buffer.popleft()
modified_tuple = list(deepcopy(old_tuple))
# build time-coherent tuple from new tuple and old action
modified_tuple[0] = state
# modified_tuple[1] = action
modified_tuple[2] = reward # self.m_step_reward(first_reward=old_tuple[2])
modified_tuple[3] = next_state
modified_tuple = tuple(modified_tuple)
self.memory.append(modified_tuple)
class ForwardModel:
def __init__(self, keras_model):
self.keras_model = keras_model
def get_next_state(self, state, action):
input = concatenate_state_action(state, action)
return self.keras_model.predict(input)
def reset_to_state(self, state):
# not necessary here. Only used if the forward_model is the actual env instance
pass | 13,916 | 46.498294 | 142 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/plot_time.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import math
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
# results = pd.read_csv('results_cartpole.csv')
results = pd.read_csv('results_acrobot.csv')
# results = pd.read_csv('results_mcar.csv')
# results = pd.read_csv('results_cartpole_reshaped.csv')
runs = 5
delays = np.unique(results['delay_value'])
# delays_str = ['2','4','6','8','10','15','20']
algorithms = ['DRDQN', 'Delayed DQN', 'DQN']
colors = {'DRDQN':u'#1f77b4', 'Delayed DQN':u'#ff7f0e', 'DQN':u'#2ca02c'}
count = 0
for algorithm in algorithms:
x = []
for delay in delays:
avg = np.zeros(runs)
for run in range(runs):
avg[run] = results['Runtime'][count]
count += 1
x.append(np.mean(avg))
plt.plot(delays, x, label=algorithm, marker='o', color=colors[algorithm])
plt.xlabel('Delays', fontsize=16)
plt.xticks([2,4,6,8,10], fontsize=16)
plt.ylabel('Runtime (Minutes)', fontsize=16)
plt.yticks(fontsize=16)
# plt.title('CartPole-v0', fontsize=20)
plt.title('Acrobot-v1', fontsize=20)
# plt.title('MountainCar-v0', fontsize=20)
# plt.title('CartPole-v0 (Reshaped Rewards)', fontsize=20)
plt.legend()
plt.tight_layout()
plt.show() | 1,323 | 31.292683 | 77 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/ddqn_main.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" # Suppress Tensorflow Messages
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
from dqn_agents import DDQNAgent, DDQNPlanningAgent, update_loss, reshape_state
from init_main import init_main
import wandb
from tqdm import tqdm
import argparse
import socket
os.environ["WANDB_SILENT"] = "true"
# os.environ["WANDB_MODE"] = "dryrun"
parser = argparse.ArgumentParser()
# parser.add_argument("--algorithm", default='oblivious', help="algorithm")
# parser.add_argument("--algorithm", default='delayed', help="algorithm")
# parser.add_argument("--algorithm", default='delay', help="algorithm")
parser.add_argument("--algorithm", default='augmented', help="algorithm")
parser.add_argument("--delay", default=0, help="environment delay")
parser.add_argument("--seed", default=0, help="seed")
args = parser.parse_args()
algorithm = args.algorithm
delay = int(args.delay)
seed = int(args.seed)
AVERAGE_OVER_LAST_EP = 1
# EPISODES = 3500
SAVE_PATH = 'saved_agents'
EP_LEN_LIMIT = int(1e4)
EVAL_FREQ = 5
def init_episode(delayed_env, agent, augment_state, state_size):
ep_reward = 0
ep_reshaped_reward = 0
state = delayed_env.reset()
state = massage_state(state, augment_state, delayed_env, state_size)
agent.clear_action_buffer()
loss_dict = {}
loss_count = 0
ep_step = 0
return ep_reward, ep_reshaped_reward, state, loss_dict, loss_count, ep_step
def routinely_save_agent(e, env_name):
pass
# agent_name = env_name + '_ddqn_delay.h5'
# if e % 349 == 0:
# if not os.path.isdir(SAVE_PATH):
# os.makedirs(SAVE_PATH)
# agent_full_name = wandb.run.id + '_' + agent_name
# agent_path = os.path.join(SAVE_PATH, agent_full_name)
# agent.save(agent_path)
# print('saved agent to {}'.format(agent_path))
def agent_act(config, agent, state, delayed_env, eval=False):
if config.agent_type == 'delayed':
action = agent.act(state, pending_actions=delayed_env.get_pending_actions(), eval=eval)
else:
action = agent.act(state, eval)
return action
def massage_state(state, augment_state, delayed_env, state_size):
if augment_state:
state = np.concatenate((state, delayed_env.get_pending_actions()))
state = reshape_state(state, delayed_env.is_atari_env, state_size)
return state
if __name__ == "__main__":
config, delayed_env, state_size, action_size, done, batch_size = init_main(algorithm, delay, seed)
score_vec = []
# for non-atari (i.e. cartpole) env, run on CPU
# if not delayed_env.is_atari_env:
kwargs = {
'action_size': action_size,
'is_atari_env': delayed_env.is_atari_env,
'delay_value': config.delay_value,
'epsilon_min': config.epsilon_min,
'epsilon_decay': config.epsilon_decay,
'learning_rate': config.learning_rate,
'epsilon': config.epsilon,
'use_m_step_reward': config.use_m_step_reward,
'use_latest_reward': config.use_latest_reward
}
# if not config.double_q:
# agent = DQNAgent(state_size=state_size, **kwargs)
# else:
augment_state = False
# wandb.config.update({'augment_state': False}, allow_val_change=True)
if config.agent_type == 'delayed':
agent = DDQNPlanningAgent(seed=seed, state_size=state_size, env=delayed_env,
use_learned_forward_model=config.use_learned_forward_model, **kwargs)
else:
if config.agent_type == 'augmented':
# wandb.config.update({'augment_state': True}, allow_val_change=True)
augment_state = True
state_size += config.delay_value
agent = DDQNAgent(seed=seed, state_size=state_size, **kwargs)
elif config.agent_type == 'delay':
agent = DDQNAgent(seed=seed, state_size=state_size, is_delayed_agent=True, **kwargs)
# third option is 'oblivious'
else:
agent = DDQNAgent(seed=seed, state_size=state_size, **kwargs)
episode = 0
ep_reward, ep_reshaped_reward, state, loss_dict, loss_count, ep_step = init_episode(delayed_env, agent,
augment_state, state_size)
total_steps_delay_dependent = int(1000000) # + config.delay_value * 10000)
# eval_done = False
for step_num in tqdm(range(total_steps_delay_dependent)):
action = agent_act(config, agent, state, delayed_env, eval=False)
next_state, reward, done, _ = delayed_env.step(action)
ep_reward += reward
if config.use_reward_shaping and not delayed_env.is_atari_env:
reward = delayed_env.get_shaped_reward(next_state, reward)
ep_reshaped_reward += reward
next_state = massage_state(next_state, augment_state, delayed_env, state_size)
can_memorize = ep_step > config.delay_value or not delayed_env.pretrained_agent_loaded
if can_memorize: # otherwise, we're using expert samples initially which is unfair
agent.memorize(state, action, reward, next_state, done)
state = next_state
if config.double_q and step_num % config.target_network_update_freq == 0:
agent.update_target_model()
if len(agent.memory) > batch_size and step_num % config.train_freq == 0:
batch_loss_dict = agent.replay(batch_size, step_num)
update_loss(loss_dict, batch_loss_dict)
loss_count += 1
ep_step += 1
if done:
routinely_save_agent(episode, config.env_name)
wandb_dict = {'reward': ep_reward, 'ep_reshaped_reward': ep_reshaped_reward}
if 'f_model_loss' in loss_dict:
f_model_loss = loss_dict['f_model_loss'] / loss_count
wandb_dict['f_model_loss'] = f_model_loss
wandb.log(wandb_dict, step=step_num)
score_vec.append(ep_reward)
episode += 1
ep_reward, ep_reshaped_reward, state, loss_dict, loss_count, ep_step = init_episode(delayed_env, agent, augment_state,
state_size)
tot_ep_num = len(score_vec)
avg_over = round(tot_ep_num * AVERAGE_OVER_LAST_EP)
final_avg_score = np.mean(score_vec[-avg_over:])
wandb.log({'final_score': final_avg_score})
| 6,390 | 42.182432 | 130 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/plot.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import math
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams.update({'font.size': 13})
# results = pd.read_csv('results_cartpole.csv')
# results = pd.read_csv('results_acrobot.csv')
results = pd.read_csv('results_mcar.csv')
# results = pd.read_csv('results_cartpole_reshaped.csv')
runs = 10
delays = np.unique(results['delay_value'])
delays_str = ['2','4','6','8','10','15','20']
algorithms = ['DRDQN', 'Delayed DQN', 'DQN']
colors = {'DRDQN':u'#1f77b4', 'Delayed DQN':u'#ff7f0e', 'DQN':u'#2ca02c'}
count = 0
for algorithm in algorithms:
x = []
x_std = []
for delay in delays:
avg = np.zeros(runs)
for run in range(runs):
avg[run] = results['final_score'][count]
print(algorithm, delay, run, avg[run])
if math.isnan(avg[run]):
avg[run] = -200
count += 1
x.append(np.mean(avg))
x_std.append(np.std(avg)/np.sqrt(runs))
# plt.plot(delays_str, x, label=algorithm, marker='o', color=colors[algorithm])
plt.errorbar(delays, x, yerr=x_std, label=algorithm, marker='o', color=colors[algorithm], uplims=True, lolims=True)
plt.xticks(fontsize=16) #[2,4,6,8,10,15,20]
plt.ylabel('Rewards', fontsize=16)
plt.xlabel('Delays', fontsize=16)
plt.yticks(fontsize=16)
# plt.ylim(0,220)
# plt.title('CartPole-v0', fontsize=20)
# plt.title('Acrobot-v1', fontsize=20)
plt.title('MountainCar-v0', fontsize=20)
# plt.title('CartPole-v0 (Reshaped Rewards)', fontsize=20)
plt.legend()
plt.tight_layout()
plt.show()
| 1,652 | 31.411765 | 119 | py |
DelayResolvedRL | DelayResolvedRL-main/Gym(Constant)/delayed_env.py | from collections import deque
from dqn_agents import DQNAgent
import numpy as np
from dqn_agents import reshape_state
from numpy import sin, cos, pi
class DelayedEnv:
def __init__(self, orig_env, delay_value):
self.orig_env = orig_env
self.env_name = str(self.orig_env)
self.is_atari_env = 'AtariEnv' in self.env_name
self.pending_actions = deque()
self.delay_value = delay_value
self.state_size = orig_env.observation_space.shape
if not self.is_atari_env:
self.state_size = self.state_size[0]
self.action_size = orig_env.action_space.n
self.stored_init_state = None
self.trained_non_delayed_agent = DQNAgent(seed=0, state_size=self.state_size,
action_size=self.action_size, is_delayed_agent=False,
delay_value=0, epsilon=0, is_atari_env=self.is_atari_env)
self.pretrained_agent_loaded = False
def step(self, action):
if self.delay_value > 0:
self.pending_actions.append(action)
if len(self.pending_actions) - 1 >= self.delay_value:
executed_action = self.pending_actions.popleft()
else:
curr_state = reshape_state(self.get_curr_state(), self.is_atari_env, self.state_size)
executed_action = self.trained_non_delayed_agent.act(curr_state)
else:
executed_action = action
return self.orig_env.step(executed_action)
def reset(self):
self.pending_actions.clear()
return self.orig_env.reset()
def get_shaped_reward(self, state, orig_reward):
reward = orig_reward
if 'CartPole' in self.env_name:
x, x_dot, theta, theta_dot = state
r1 = (self.orig_env.x_threshold - abs(x)) / self.orig_env.x_threshold - 0.8
r2 = (self.orig_env.theta_threshold_radians - abs(
theta)) / self.orig_env.theta_threshold_radians - 0.5
reward = r1 + r2
if 'MountainCar' in self.env_name:
position = state[0]
reward = (position - self.orig_env.goal_position) / ((self.orig_env.max_position - self.orig_env.min_position) * 10)
if position >= 0.1:
reward += 10
elif position >= 0.25:
reward += 50
elif position >= 0.5:
reward += 100
return reward
def get_pending_actions(self):
if len(self.pending_actions) == 0 and self.delay_value > 0:
# reconstruct anticipated trajectory using the oracle
self.store_initial_state()
curr_state = self.get_curr_state()
for i in range(self.delay_value):
curr_state = reshape_state(curr_state, self.is_atari_env, self.state_size)
estimated_action = self.trained_non_delayed_agent.act(curr_state)
self.pending_actions.append(estimated_action)
curr_state = self.get_next_state(state=None, action=estimated_action)
self.restore_initial_state()
return self.pending_actions
def store_initial_state(self):
if self.is_atari_env:
self.stored_init_state = self.orig_env.clone_state()
else:
self.stored_init_state = self.orig_env.unwrapped.state
def restore_initial_state(self):
if self.is_atari_env:
self.orig_env.restore_state(self.stored_init_state)
else:
self.orig_env.unwrapped.state = self.stored_init_state
def get_curr_state(self):
if self.is_atari_env:
curr_state = self.orig_env.ale.getScreenRGB2()
else:
curr_state = self.orig_env.unwrapped.state
if 'Acrobot' in self.env_name:
curr_state = np.array([cos(curr_state[0]), sin(curr_state[0]), cos(curr_state[1]), sin(curr_state[1]),
curr_state[2], curr_state[3]])
return curr_state
def get_next_state(self, state, action):
next_state, _, _, _ = self.orig_env.step(action)
self.orig_env._elapsed_steps -= 1
return next_state
def reset_to_state(self, state):
self.orig_env.unwrapped.state = state
# | 4,293 | 40.288462 | 128 | py |
baconian-project | baconian-project-master/setup.py | from setuptools import setup, find_packages
import os
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
req = parse_requirements(filename=os.path.join(CURRENT_PATH, 'requirements.txt'))
# req = [str(ir.req) for ir in req]
# print(req)
with open(os.path.join(CURRENT_PATH, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
exec(open('baconian/version.py').read())
ver = __version__
setup(
name='baconian',
version=ver,
url='https://github.com/cap-ntu/baconian-project',
license='MIT License',
author='Linsen Dong',
author_email='[email protected]',
description='model-based reinforcement learning toolbox',
install_requires=req,
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(include=['baconian', 'baconian.*'], exclude=[]),
python_requires='>=3.5',
include_package_data=True,
package_data={'baconian': ['config/required_keys/*', 'benchmark/**/*', './*']}
)
| 1,245 | 30.15 | 82 | py |
baconian-project | baconian-project-master/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/version.py | __version__ = '0.2.6'
| 22 | 10.5 | 21 | py |
baconian-project | baconian-project-master/baconian/__init__.py | import os
from baconian.version import __version__
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
| 107 | 26 | 55 | py |
baconian-project | baconian-project-master/baconian/core/status.py | import abc
from baconian.common.logging import ConsoleLogger
from baconian.common.error import *
from functools import wraps
import numpy as np
__all__ = ['Status', 'StatusWithSubInfo', 'StatusWithSingleInfo', 'StatusWithInfo', 'StatusCollector',
'reset_global_status_collect', 'register_counter_info_to_status_decorator', 'get_global_status_collect']
class Status(object):
"""
One of the core module, a class to indicate the current status of an object in Baconian.
"""
def __init__(self, obj):
"""
The object that status will composed into.
:param obj: A object of Baconian, e.g., Environment, Agent, Algo.
"""
self.obj = obj
self._status_val = None
if hasattr(obj, 'STATUS_LIST'):
self._status_list = obj.STATUS_LIST
else:
self._status_list = None
if hasattr(obj, 'INIT_STATUS') and obj.INIT_STATUS is not None:
self.set_status(new_status=obj.INIT_STATUS)
else:
self._status_val = None
def __call__(self, *args, **kwargs):
return dict(status=self._status_val)
def set_status(self, new_status: str):
if not isinstance(new_status, str):
raise TypeError("{} is not string".format(new_status))
if self._status_list:
try:
assert new_status in self._status_list
except AssertionError as e:
ConsoleLogger().print('info', "{} New status :{} not in the status list: {} ".format(e, new_status,
self._status_list))
self._status_val = new_status
else:
self._status_val = new_status
def get_status(self) -> dict:
return self()
class StatusWithInfo(Status):
@abc.abstractmethod
def append_new_info(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def has_info(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def update_info(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def reset(self):
raise NotImplementedError
@abc.abstractmethod
def get_specific_info_key_status(self, info_key, *args, **kwargs):
raise NotImplementedError
class StatusWithSingleInfo(StatusWithInfo):
def __init__(self, obj):
super().__init__(obj)
self._info_dict = {}
def __call__(self, *args, **kwargs):
res = super().__call__(*args, **kwargs)
return {**res, **self._info_dict}
def set_status(self, new_status: str):
return super().set_status(new_status)
def get_status(self) -> dict:
return self()
def append_new_info(self, info_key: str, init_value, under_status=None):
if info_key == 'status':
raise ValueError("can use key: status which is a system default key")
if info_key in self._info_dict:
return
else:
self._info_dict[info_key] = init_value
def get_specific_info_key_status(self, info_key, *args, **kwargs):
try:
return self._info_dict[info_key]
except KeyError:
ConsoleLogger().print('ERROR',
'try to access info key status: {} of obj: {}'.format(info_key, self.obj.name))
return None
def has_info(self, info_key, **kwargs):
return info_key in self._info_dict
def update_info(self, info_key, increment, under_status=None):
if not self.has_info(info_key=info_key):
self.append_new_info(info_key=info_key, init_value=0)
self._info_dict[info_key] += increment
def reset(self):
self._info_dict = {}
class StatusWithSubInfo(StatusWithInfo):
def __init__(self, obj):
super().__init__(obj)
if not hasattr(obj, 'STATUS_LIST') or not hasattr(obj, 'INIT_STATUS'):
raise ValueError(
"StatusWithSubInfo require the source object to have class attr: STATUS_LIST and INIT_STATUS")
self._info_dict_with_sub_info = {}
for key in self._status_list:
self._info_dict_with_sub_info[key] = {}
def __call__(self, *args, **kwargs):
res = super().__call__(*args, **kwargs)
return {**res, **self._info_dict_with_sub_info[self._status_val]}
def set_status(self, new_status: str):
return super().set_status(new_status)
def get_status(self) -> dict:
return self()
def get_specific_info_key_status(self, info_key, under_status, *args, **kwargs):
res = self._get_specific_info_key_status(info_key=info_key, under_status=under_status, *args, **kwargs)
if res is None:
ConsoleLogger().print('error', 'try to access info key status: {} under status {} of obj: {}'.
format(info_key, under_status, self.obj.name))
else:
return res
def _get_specific_info_key_status(self, info_key, under_status, *args, **kwargs):
try:
return self._info_dict_with_sub_info[under_status][info_key]
except KeyError:
return None
def group_specific_info_key(self, info_key, group_way):
assert group_way in ('sum', 'max', 'min', 'mean')
res = []
for st in self._status_list:
re = self._get_specific_info_key_status(info_key=info_key, under_status=st)
if re:
res.append(re)
if group_way == 'sum':
return sum(res)
if group_way == 'max':
return max(res)
if group_way == 'min':
return min(res)
if group_way == 'mean':
return np.mean(np.array(res)).item()
def append_new_info(self, info_key: str, init_value, under_status=None):
if not under_status:
under_status = self._status_val
if info_key == 'status':
raise ValueError("can use key: status which is a system default key")
if info_key in self._info_dict_with_sub_info[under_status]:
return
else:
self._info_dict_with_sub_info[under_status][info_key] = init_value
def has_info(self, info_key, under_status=None):
if not under_status:
under_status = self._status_val
return info_key in self._info_dict_with_sub_info[under_status]
def update_info(self, info_key, increment, under_status=None):
if not under_status:
under_status = self._status_val
if not self.has_info(info_key=info_key, under_status=under_status):
self.append_new_info(info_key=info_key, init_value=0, under_status=under_status)
self._info_dict_with_sub_info[under_status][info_key] += increment
def reset(self):
for key in self._status_list:
self._info_dict_with_sub_info[key] = {}
class StatusCollector(object):
def __init__(self):
self._register_status_dict = []
def __call__(self, key: str = None, *args, **kwargs):
if key:
for val in self._register_status_dict:
if val['return_name'] == key:
obj = val['obj']
assert hasattr(obj, '_status')
assert isinstance(getattr(obj, '_status'), StatusWithInfo)
if getattr(obj, '_status').has_info(info_key=val['info_key'],
under_status=val['under_status']) is False:
raise StatusInfoNotRegisteredError(
'{} do not have {} under {}'.format(obj, val['info_key'], val['under_status']))
res = obj._status.get_specific_info_key_status(under_status=val['under_status'],
info_key=val['info_key'])
return res
else:
stat_dict = dict()
for val in self._register_status_dict:
obj = val['obj']
assert hasattr(obj, '_status')
assert isinstance(getattr(obj, '_status'), StatusWithInfo)
if getattr(obj, '_status').has_info(info_key=val['info_key'],
under_status=val['under_status']) is False:
raise StatusInfoNotRegisteredError(
'{} do not have {} under {}'.format(obj, val['info_key'], val['under_status']))
res = obj._status.get_specific_info_key_status(under_status=val['under_status'],
info_key=val['info_key'])
stat_dict[val['return_name']] = res
return stat_dict
def get_status(self) -> dict:
return self()
def register_info_key_status(self, obj, info_key: str, return_name: str, under_status=None):
ConsoleLogger().print('info',
'registered obj: {}, key: {}, return name: {}, under status: {}'.format(obj, info_key,
return_name,
under_status))
for val in self._register_status_dict:
assert return_name != val['return_name']
self._register_status_dict.append(
dict(obj=obj, info_key=info_key, under_status=under_status, return_name=return_name))
try:
self(info_key)
except StatusInfoNotRegisteredError as e:
ConsoleLogger().print('warning',
'new registred info: obj: {}, key: {}, return name: {}, under status: {} can not be detected now'.format(
obj, info_key,
return_name,
under_status))
def reset(self):
self._register_status_dict = []
def register_counter_info_to_status_decorator(increment, info_key, under_status: (str, tuple) = None,
ignore_wrong_status=False):
def wrap(fn):
if under_status:
assert isinstance(under_status, (str, tuple))
if isinstance(under_status, str):
final_st = tuple([under_status])
else:
final_st = under_status
else:
final_st = (None,)
@wraps(fn)
def wrap_with_self(self, *args, **kwargs):
# todo record() called in fn will lost the just appended info_key at the very first
obj = self
if not hasattr(obj, '_status') or not isinstance(getattr(obj, '_status'), StatusWithInfo):
raise ValueError(
' the object {} does not not have attribute StatusWithInfo instance or hold wrong type of Status'.format(
obj))
assert isinstance(getattr(obj, '_status'), StatusWithInfo)
obj_status = getattr(obj, '_status')
for st in final_st:
obj_status.append_new_info(info_key=info_key, init_value=0, under_status=st)
res = fn(self, *args, **kwargs)
for st in final_st:
if st and st != obj.get_status()['status'] and not ignore_wrong_status:
raise ValueError('register counter info under status: {} but got status {}'.format(st,
obj.get_status()[
'status']))
obj_status.update_info(info_key=info_key, increment=increment,
under_status=obj.get_status()['status'])
return res
return wrap_with_self
return wrap
# _global_experiment_status = StatusWithSingleInfo(obj=None)
#
# from baconian.config.global_config import GlobalConfig
#
# for key in GlobalConfig().DEFAULT_EXPERIMENT_END_POINT.keys():
# _global_experiment_status.append_new_info(info_key=key, init_value=0)
# def get_global_experiment_status() -> StatusWithSingleInfo:
# return globals()['_global_experiment_status']
#
# def reset_global_experiment_status():
# globals()['_global_experiment_status'].reset()
#
_global_status_collector = StatusCollector()
def get_global_status_collect() -> StatusCollector:
return globals()['_global_status_collector']
def reset_global_status_collect():
globals()['_global_status_collector'].reset()
| 12,755 | 38.49226 | 139 | py |
baconian-project | baconian-project-master/baconian/core/core.py | import gym
import numpy as np
from typeguard import typechecked
from baconian.common.spaces import Space
from baconian.common.special import flat_dim, flatten
from baconian.common.logging import Recorder
from baconian.config.global_config import GlobalConfig
from baconian.core.status import *
from baconian.core.util import register_name_globally, init_func_arg_record_decorator
"""
This module contains the some core classes of baconian
"""
class Basic(object):
""" Basic class within the whole framework"""
STATUS_LIST = GlobalConfig().DEFAULT_BASIC_STATUS_LIST
INIT_STATUS = GlobalConfig().DEFAULT_BASIC_INIT_STATUS
required_key_dict = ()
allow_duplicate_name = False
def __init__(self, name: str, status=None):
"""
Init a new Basic instance.
:param name: name of the object, can be determined to generate log path, handle tensorflow name scope etc.
:type name: str
:param status: A status instance :py:class:`~baconian.core.status.Status` to indicate the status of the object
:type status: Status
"""
if not status:
self._status = Status(self)
else:
self._status = status
self._name = name
register_name_globally(name=name, obj=self)
def init(self, *args, **kwargs):
"""Initialize the object"""
raise NotImplementedError
def get_status(self) -> dict:
""" Return the object's status, a dictionary."""
return self._status.get_status()
def set_status(self, val):
""" Set the object's status."""
self._status.set_status(val)
@property
def name(self):
""" The name(id) of object, a string."""
return self._name
@property
def status_list(self):
""" Status list of the object, ('TRAIN', 'TEST')."""
return self.STATUS_LIST
def save(self, *args, **kwargs):
""" Save the parameters in training checkpoints."""
raise NotImplementedError
def load(self, *args, **kwargs):
""" Load the parameters from training checkpoints."""
raise NotImplementedError
class Env(gym.Env, Basic):
"""
Abstract class for environment
"""
key_list = ()
STATUS_LIST = ('JUST_RESET', 'INITED', 'TRAIN', 'TEST', 'CREATED')
INIT_STATUS = 'CREATED'
@typechecked
def __init__(self, name: str = 'env', copy_from_env=None):
super(Env, self).__init__(status=StatusWithSubInfo(obj=self), name=name)
self.action_space = None
self.observation_space = None
self.trajectory_level_step_count = 0
self.recorder = Recorder(default_obj=self)
self._last_reset_point = 0
self.total_step_count_fn = lambda: self._status.group_specific_info_key(info_key='step', group_way='sum')
self.env_spec = None
if copy_from_env:
assert isinstance(copy_from_env, Env)
self.action_space = copy_from_env.action_space
self.observation_space = copy_from_env.observation_space
self.trajectory_level_step_count = copy_from_env.trajectory_level_step_count
self.trajectory_level_step_count = copy_from_env._last_reset_point
self.env_spec = copy_from_env.env_spec
@register_counter_info_to_status_decorator(increment=1, info_key='step', under_status=('TRAIN', 'TEST'),
ignore_wrong_status=True)
def step(self, action):
"""
:param action: agent's action, the environment will react responding to action
:type action: method
"""
self.trajectory_level_step_count += 1
pass
@register_counter_info_to_status_decorator(increment=1, info_key='reset', under_status='JUST_RESET')
def reset(self):
""" Set the status to 'JUST_RESET', and update new reset point"""
self._status.set_status('JUST_RESET')
self._last_reset_point = self.total_step_count_fn()
self.trajectory_level_step_count = 0
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self):
""" Set the status to 'INITED'."""
self._status.set_status('INITED')
def get_state(self):
""" Get the status of the environment."""
raise NotImplementedError
def seed(self, seed=None):
"""
:param seed: seed to generate random number
:type seed: int
:return: seed of the unwrapped environment
:rtype: int
"""
return self.unwrapped.seed(seed=seed)
class EnvSpec(object):
@init_func_arg_record_decorator()
@typechecked
def __init__(self, obs_space: Space, action_space: Space):
self._obs_space = obs_space
self._action_space = action_space
@property
def obs_shape(self):
obs_shape = tuple(np.array(self.obs_space.sample()).shape)
if len(obs_shape) == 0:
obs_shape = (1,)
return obs_shape
@property
def action_shape(self):
action_shape = tuple(np.array(self.action_space.sample()).shape)
if len(action_shape) == 0:
action_shape = ()
return action_shape
@property
def obs_space(self):
"""
:return: Observation space of environment
:rtype: Space
"""
return self._obs_space
@property
def action_space(self):
"""
:return: Action space of environment
:rtype: Space
"""
return self._action_space
@obs_space.setter
def obs_space(self, s: Space):
self._obs_space = s
@action_space.setter
def action_space(self, s: Space):
self._action_space = s
@property
def flat_obs_dim(self) -> int:
"""
:return: the dimension(length) of flatten observation space
:rtype: int
"""
return int(flat_dim(self.obs_space))
@property
def flat_action_dim(self) -> int:
"""
:return: the dimension(length) of flatten action space
:rtype: int
"""
return int(flat_dim(self.action_space))
@staticmethod
def flat(space: Space, obs_or_action: (np.ndarray, list)):
"""
flat the input obs or action
:param space: space of environment
:type space: Space
:param obs_or_action: action or observation space
:type obs_or_action: (np.ndarray, list)
:return: flatten action or observation space
:rtype: Space
"""
return flatten(space, obs_or_action)
def flat_action(self, action: (np.ndarray, list)):
"""
:param action: action taken by agent
:type action: (np.ndarray, list)
:return: flatten action parameter
:rtype: np.ndarray
"""
return flatten(self.action_space, action)
def flat_obs(self, obs: (np.ndarray, list)):
"""
:param obs: observation of the agent
:type obs: (np.ndarray, list)
:return: flatten observation parameter
:rtype: np.ndarray
"""
return flatten(self.obs_space, obs)
| 7,160 | 29.602564 | 118 | py |
baconian-project | baconian-project-master/baconian/core/experiment.py | """
For experiments, its functionality should include:
1. experiment and config set up
2. logging control
3. hyper-param tuning etc.
4. visualization
5. any related experiment utility
...
"""
from baconian.core.core import Basic
from baconian.common.logging import ConsoleLogger
from baconian.config.global_config import GlobalConfig
from baconian.core.tuner import Tuner
from baconian.core.util import init_func_arg_record_decorator
import tensorflow as tf
from typeguard import typechecked
from baconian.tf.util import create_new_tf_session
from baconian.core.core import Env
from baconian.core.agent import Agent
from baconian.common.logging import Recorder
from baconian.core.status import *
from baconian.core.flow.train_test_flow import Flow
from baconian.core.global_var import reset_all as reset_global_var
from baconian.common.logging import reset_logging
class Experiment(Basic):
STATUS_LIST = ('CREATED', 'INITED', 'RUNNING', 'FINISHED', 'CORRUPTED')
INIT_STATUS = 'CREATED'
# required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_EXPERIMENT_REQUIRED_KEY_LIST)
required_key_dict = dict()
@init_func_arg_record_decorator()
@typechecked
def __init__(self,
name: str,
agent: Agent,
env: Env,
flow: Flow,
tuner: Tuner = None,
register_default_global_status=True
):
"""
:param name: name of experiment
:type name: str
:param agent: agent of experiment
:type agent: Agent
:param env: environment of experiment
:type env: Env
:param flow: control flow to experiment
:type flow: Flow
:param tuner: hyper-parameter tuning method, currently in development
:type tuner: Tuner
:param register_default_global_status: register info key and status into global status collection
:type register_default_global_status: bool
"""
super().__init__(status=StatusWithSingleInfo(obj=self), name=name)
self.agent = agent
self.env = env
self.tuner = tuner
self.recorder = Recorder(flush_by_split_status=False, default_obj=self)
# self.status_collector = StatusCollector()
self.flow = flow
if register_default_global_status is True:
get_global_status_collect().register_info_key_status(obj=agent,
info_key='predict_counter',
under_status='TRAIN',
return_name='TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=agent,
info_key='sample_counter',
under_status='TRAIN',
return_name='TOTAL_AGENT_TRAIN_SAMPLE_FUNC_COUNT')
get_global_status_collect().register_info_key_status(obj=agent,
info_key='predict_counter',
under_status='TEST',
return_name='TOTAL_AGENT_TEST_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=agent,
info_key='update_counter',
under_status='TRAIN',
return_name='TOTAL_AGENT_UPDATE_COUNT')
get_global_status_collect().register_info_key_status(obj=env,
info_key='step',
under_status='TEST',
return_name='TOTAL_ENV_STEP_TEST_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=env,
info_key='step',
under_status='TRAIN',
return_name='TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT')
def init(self):
""" Create a new TensorFlow session, and set status to 'INITED'."""
create_new_tf_session()
self.agent.init()
self.env.init()
self.set_status('INITED')
def run(self):
""" Run the experiment, and set status to 'RUNNING'."""
GlobalConfig().freeze()
self.init()
self.set_status('RUNNING')
res = self.flow.launch()
if res is False:
self.set_status('CORRUPTED')
else:
self.set_status('FINISHED')
self._exit()
def _exit(self):
""" Exit the experiment, reset global configurations and logging module."""
sess = tf.get_default_session()
if sess:
sess.__exit__(None, None, None)
tf.reset_default_graph()
reset_global_status_collect()
reset_logging()
reset_global_var()
GlobalConfig().unfreeze()
def TOTAL_AGENT_UPDATE_COUNT(self):
return get_global_status_collect()('TOTAL_AGENT_UPDATE_COUNT')
def TOTAL_AGENT_TRAIN_SAMPLE_COUNT(self):
return get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
def TOTAL_AGENT_TEST_SAMPLE_COUNT(self):
return get_global_status_collect()('TOTAL_AGENT_TEST_SAMPLE_COUNT')
def TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT(self):
return get_global_status_collect()('TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT')
def TOTAL_ENV_STEP_TEST_SAMPLE_COUNT(self):
return get_global_status_collect()('TOTAL_ENV_STEP_TEST_SAMPLE_COUNT')
| 6,078 | 43.372263 | 115 | py |
baconian-project | baconian-project-master/baconian/core/experiment_runner.py | import os
import random
import time
import numpy as np
import tensorflow as tf
from GPUtil import GPUtil as Gpu
from typeguard import typechecked
from baconian.common import files as file
from baconian.common.logging import Logger, ConsoleLogger
from baconian.config.global_config import GlobalConfig
from copy import deepcopy
import tracemalloc
def _reset_global_seed(seed):
"""
:param seed: seed to reset global variables
:type seed: int
"""
sess = tf.get_default_session()
if sess:
sess.__exit__(None, None, None)
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
@typechecked
def single_exp_runner(task_fn, auto_choose_gpu_flag=False, gpu_id: int = 0, seed=None, del_if_log_path_existed=False,
**task_fn_kwargs):
"""
:param task_fn: task function defined bu users
:type task_fn: method
:param auto_choose_gpu_flag: auto choose gpu, default False
:type auto_choose_gpu_flag: bool
:param gpu_id: gpu id, default 0
:type gpu_id: int
:param seed: seed generated by system time
:type seed: int
:param del_if_log_path_existed:delete obsolete log file path if existed, by default False
:type del_if_log_path_existed: bool
:param task_fn_kwargs:
:type task_fn_kwargs:
:return:
:rtype:
"""
os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
if auto_choose_gpu_flag is True:
DEVICE_ID_LIST = Gpu.getFirstAvailable()
DEVICE_ID = DEVICE_ID_LIST[0]
os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if not seed:
seed = int(round(time.time() * 1000)) % (2 ** 32 - 1)
_reset_global_seed(seed)
print("create log path at {}".format(GlobalConfig().DEFAULT_LOG_PATH), flush=True)
file.create_path(path=GlobalConfig().DEFAULT_LOG_PATH, del_if_existed=del_if_log_path_existed)
Logger().init(config_or_config_dict=dict(),
log_path=GlobalConfig().DEFAULT_LOG_PATH,
log_level=GlobalConfig().DEFAULT_LOG_LEVEL)
ConsoleLogger().init(to_file_flag=GlobalConfig().DEFAULT_WRITE_CONSOLE_LOG_TO_FILE_FLAG,
to_file_name=os.path.join(GlobalConfig().DEFAULT_LOG_PATH,
GlobalConfig().DEFAULT_CONSOLE_LOG_FILE_NAME),
level=GlobalConfig().DEFAULT_LOG_LEVEL,
logger_name=GlobalConfig().DEFAULT_CONSOLE_LOGGER_NAME)
task_fn(**task_fn_kwargs)
@typechecked
def duplicate_exp_runner(num, task_fn, auto_choose_gpu_flag=False, gpu_id: int = 0, seeds: list = None,
del_if_log_path_existed=False,
**task_fn_kwargs):
"""
:param num: the number of multiple experiments
:type num: int
:param task_fn: task function, defined by users
:type task_fn: method
:param auto_choose_gpu_flag: auto choose gpu, default False
:type auto_choose_gpu_flag: bool
:param gpu_id: gpu id, default 0
:type gpu_id: int
:param seeds: seeds generated by system time
:type seeds: list
:param del_if_log_path_existed: delete the existing log path, default False
:type del_if_log_path_existed: bool
:param task_fn_kwargs:
:type task_fn_kwargs:
:return:
:rtype:
"""
# tracemalloc.start(100)
if seeds:
assert len(seeds) == num
base_log_path = deepcopy(GlobalConfig().DEFAULT_LOG_PATH)
for i in range(num):
GlobalConfig().set('DEFAULT_LOG_PATH', os.path.join(base_log_path, 'exp_{}'.format(i)))
single_exp_runner(task_fn=task_fn, auto_choose_gpu_flag=auto_choose_gpu_flag,
del_if_log_path_existed=del_if_log_path_existed,
gpu_id=gpu_id, seed=seeds[i] if seeds else None, **task_fn_kwargs)
| 3,915 | 33.654867 | 117 | py |
baconian-project | baconian-project-master/baconian/core/config.py | import json_tricks as json
import os
import typeguard as tg
class Config(object):
def __init__(self, required_key_dict: dict, config_dict=None, cls_name=""):
self.cls_name = cls_name
self.required_key_dict = required_key_dict
if config_dict:
self.config_dict = config_dict
else:
self._config_dict = {}
@property
def config_dict(self):
return self._config_dict
@config_dict.setter
def config_dict(self, new_value):
if self.check_config(dict=new_value, key_dict=self.required_key_dict) is True:
for key, val in new_value.items():
if type(val) is list:
new_value[str(key)] = tuple(val)
self._config_dict = new_value
def save_config(self, path, name):
Config.save_to_json(dict=self.config_dict, path=path, file_name=name)
def load_config(self, path):
res = Config.load_json(file_path=path)
self.config_dict = res
def check_config(self, dict: dict, key_dict: dict) -> bool:
if self.check_dict_key(check_dict=dict, required_key_dict=key_dict):
return True
else:
return False
@staticmethod
def load_json(file_path):
with open(file_path, 'r') as f:
res = json.load(f)
return res
@staticmethod
def save_to_json(dict, path, file_name=None):
if file_name is not None:
path = os.path.join(path, file_name)
with open(path, 'w') as f:
json.dump(obj=dict, fp=f, indent=4, sort_keys=True)
def check_dict_key(self, check_dict: dict, required_key_dict: dict) -> bool:
for key, val in required_key_dict.items():
if not isinstance(check_dict, dict):
raise TypeError('{}: input check dict should be a dict instead of {}'.format(self.cls_name,
type(check_dict).__name__))
if key not in check_dict:
raise IndexError('{} Missing Key {}'.format(self.cls_name, key))
if isinstance(val, dict):
self.check_dict_key(check_dict=check_dict[key], required_key_dict=required_key_dict[key])
return True
def __call__(self, key):
if key not in self.config_dict:
raise KeyError('{} key {} not in the config'.format(self.cls_name, key))
else:
return self.config_dict[key]
| 2,503 | 34.771429 | 120 | py |
baconian-project | baconian-project-master/baconian/core/agent.py | from baconian.core.core import Basic, Env, EnvSpec
from baconian.envs.env_wrapper import Wrapper, ObservationWrapper, StepObservationWrapper
from baconian.common.sampler.sampler import Sampler
from baconian.common.error import *
from baconian.algo.algo import Algo
from typeguard import typechecked
from baconian.algo.misc import ExplorationStrategy
from baconian.common.sampler.sample_data import SampleData
from baconian.common.logging import Recorder, record_return_decorator
from baconian.core.status import StatusWithSubInfo
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.core.util import init_func_arg_record_decorator
from baconian.common.logging import ConsoleLogger
from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from baconian.common.schedules import EventScheduler
from baconian.common.noise import AgentActionNoiseWrapper
from baconian.core.parameters import Parameters
class Agent(Basic):
STATUS_LIST = ('CREATED', 'INITED', 'TRAIN', 'TEST')
INIT_STATUS = 'CREATED'
required_key_dict = {}
@init_func_arg_record_decorator()
@typechecked
def __init__(self, name,
# config_or_config_dict: (DictConfig, dict),
env: (Env, Wrapper),
algo: Algo,
env_spec: EnvSpec,
sampler: Sampler = None,
noise_adder: AgentActionNoiseWrapper = None,
reset_noise_every_terminal_state=False,
reset_state_every_sample=False,
exploration_strategy: ExplorationStrategy = None,
algo_saving_scheduler: EventScheduler = None):
"""
:param name: the name of the agent instance
:type name: str
:param env: environment that interacts with agent
:type env: Env
:param algo: algorithm of the agent
:type algo: Algo
:param env_spec: environment specifications: action apace and environment space
:type env_spec: EnvSpec
:param sampler: sampler
:type sampler: Sampler
:param reset_noise_every_terminal_state: reset the noise every sampled trajectory
:type reset_noise_every_terminal_state: bool
:param reset_state_every_sample: reset the state everytime perofrm the sample/rollout
:type reset_state_every_sample: bool
:param noise_adder: add action noise for exploration in action space
:type noise_adder: AgentActionNoiseWrapper
:param exploration_strategy: exploration strategy in action space
:type exploration_strategy: ExplorationStrategy
:param algo_saving_scheduler: control the schedule the varying parameters in training process
:type algo_saving_scheduler: EventSchedule
"""
super(Agent, self).__init__(name=name, status=StatusWithSubInfo(self))
self.parameters = Parameters(parameters=dict(reset_noise_every_terminal_state=reset_noise_every_terminal_state,
reset_state_every_sample=reset_state_every_sample))
self.env = env
self.algo = algo
self._env_step_count = 0
if sampler is None:
sampler = Sampler()
self.sampler = sampler
self.recorder = Recorder(default_obj=self)
self.env_spec = env_spec
if exploration_strategy:
assert isinstance(exploration_strategy, ExplorationStrategy)
self.explorations_strategy = exploration_strategy
else:
self.explorations_strategy = None
self.noise_adder = noise_adder
self.algo_saving_scheduler = algo_saving_scheduler
# @record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='update_counter', under_status='TRAIN')
def train(self, *args, **kwargs):
"""
train the agent
:return: True for successfully train the agent, false if memory buffer did not have enough data.
:rtype: bool
"""
self.set_status('TRAIN')
self.algo.set_status('TRAIN')
ConsoleLogger().print('info', 'train agent:')
try:
res = self.algo.train(*args, **kwargs)
except MemoryBufferLessThanBatchSizeError as e:
ConsoleLogger().print('warning', 'memory buffer did not have enough data to train, skip training')
return False
ConsoleLogger().print('info', res)
if self.algo_saving_scheduler and self.algo_saving_scheduler.value() is True:
self.algo.save(global_step=self._status.get_specific_info_key_status(info_key='update_counter',
under_status='TRAIN'))
# @record_return_decorator(which_recorder='self')
def test(self, sample_count) -> SampleData:
"""
test the agent
:param sample_count: how many trajectories used to evaluate the agent's performance
:type sample_count: int
:return: SampleData object.
"""
self.set_status('TEST')
self.algo.set_status('TEST')
ConsoleLogger().print('info', 'test: agent with {} trajectories'.format(sample_count))
res = self.sample(env=self.env,
sample_count=sample_count,
sample_type='trajectory',
store_flag=False,
in_which_status='TEST')
return res
@register_counter_info_to_status_decorator(increment=1, info_key='predict_counter', under_status=('TRAIN', 'TEST'),
ignore_wrong_status=True)
def predict(self, **kwargs):
"""
predict the action given the state
:param kwargs: rest parameters, include key: obs
:return: predicted action
:rtype: numpy ndarray
"""
res = None
if self.explorations_strategy and not self.is_testing:
res = self.explorations_strategy.predict(**kwargs, algo=self.algo)
else:
if self.noise_adder and not self.is_testing:
res = self.env_spec.action_space.clip(self.noise_adder(self.algo.predict(**kwargs)))
else:
res = self.algo.predict(**kwargs)
self.recorder.append_to_obj_log(obj=self, attr_name='action', status_info=self.get_status(), value=res)
return res
@register_counter_info_to_status_decorator(increment=1, info_key='sample_counter', under_status=('TRAIN', 'TEST'),
ignore_wrong_status=True)
def sample(self, env, sample_count: int, in_which_status: str = 'TRAIN', store_flag=False,
sample_type: str = 'transition') -> (
TransitionData, TrajectoryData):
"""
sample a certain number of data from environment
:param env: environment to sample
:param sample_count: int, sample count
:param in_which_status: string, environment status
:param store_flag: to store environment samples or not, default False
:param sample_type: the type of sample, 'transition' by default
:return: sample data from environment
:rtype: some subclass of SampleData: TrajectoryData or TransitionData
"""
self.set_status(in_which_status)
env.set_status(in_which_status)
self.algo.set_status(in_which_status)
ConsoleLogger().print('info',
"agent sampled {} {} under status {}".format(sample_count, sample_type,
self.get_status()))
batch_data = self.sampler.sample(agent=self,
env=env,
reset_at_start=self.parameters('reset_state_every_sample'),
sample_type=sample_type,
sample_count=sample_count)
if store_flag is True:
self.store_samples(samples=batch_data)
# todo when we have transition/ trajectory data here, the mean or sum results are still valid?
ConsoleLogger().print('info',
"sample: mean reward {}, sum reward {}".format(
batch_data.get_mean_of(set_name='reward_set'),
batch_data.get_sum_of(set_name='reward_set')))
self.recorder.append_to_obj_log(obj=self, attr_name='average_reward', status_info=self.get_status(),
value=batch_data.get_mean_of('reward_set'))
self.recorder.append_to_obj_log(obj=self, attr_name='sum_reward', status_info=self.get_status(),
value=batch_data.get_sum_of('reward_set'))
return batch_data
def reset_on_terminal_state(self):
if self.parameters('reset_noise_every_terminal_state') is True and self.noise_adder is not None:
self.noise_adder.reset()
def init(self):
"""
Initialize the algorithm, and set status to 'INITED'.
"""
self.algo.init()
self.set_status('INITED')
self.algo.warm_up(trajectory_data=self.sampler.sample(env=self.env,
agent=self,
sample_type='trajectory',
reset_at_start=True,
sample_count=self.algo.warm_up_trajectories_number))
@typechecked
def store_samples(self, samples: SampleData):
"""
store the samples into memory/replay buffer if the algorithm that agent hold need to do so, like DQN, DDPG
:param samples: sample data of the experiment
:type samples: SampleData
"""
self.algo.append_to_memory(samples=samples)
@property
def is_training(self):
"""
Check whether the agent is training. Return a boolean value.
:return: true if the agent is training
:rtype: bool
"""
return self.get_status()['status'] == 'TRAIN'
@property
def is_testing(self):
"""
Check whether the agent is testing. Return a boolean value.
:return: true if the agent is testing
:rtype: bool
"""
return self.get_status()['status'] == 'TEST'
| 10,580 | 45.004348 | 119 | py |
baconian-project | baconian-project-master/baconian/core/util.py | from copy import deepcopy
from collections import Hashable
from baconian.common.error import *
from baconian.core.global_var import get_all, reset
from functools import wraps
from baconian.config.global_config import GlobalConfig
def init_func_arg_record_decorator():
def wrap(fn):
@wraps(fn)
def wrap_with_self(self, *args, **kwargs):
get_all()['_global_obj_arg_dict'][self] = None
# get_all()['_global_obj_arg_dict'][self] = dict(args=args, kwargs=kwargs, cls=type(self))
res = fn(self, *args, **kwargs)
return res
return wrap_with_self
return wrap
def get_global_arg_dict():
return get_all()['_global_obj_arg_dict']
def copy_globally(arg_dict, source_obj_list):
"""
deprecated in the future
:param arg_dict:
:param source_obj_list:
:return:
"""
new_obj_list = []
reset('_global_name_dict')
for obj in source_obj_list:
if obj not in arg_dict:
raise ValueError('{} not in arg_dict'.format(obj))
else:
new_obj_list.append(_make_copy_object(arg_dict, obj=obj))
return new_obj_list
def register_name_globally(name: str, obj):
if name in get_all()['_global_name_dict'] and not id(obj) == id(
get_all()['_global_name_dict'][name]) and obj.allow_duplicate_name is False and \
get_all()['_global_name_dict'][
name].allow_duplicate_name is False and GlobalConfig().DEFAULT_TURN_OFF_GLOBAL_NAME_FLAG is False:
raise GlobalNameExistedError(
'name : {} is existed with object: {}'.format(name, get_all()['_global_name_dict'][name]))
else:
get_all()['_global_name_dict'][name] = obj
def _make_copy_object(arg_dict: dict, obj):
if obj not in arg_dict:
raise ValueError('{} not in arg_dict'.format(obj))
else:
args = arg_dict[obj]['args']
kwargs = arg_dict[obj]['kwargs']
cls = arg_dict[obj]['cls']
new_args = []
new_kwargs = dict()
arg_dict.pop(obj)
del obj
for a in args:
if in_dict(a, args):
new_args.append(_make_copy_object(arg_dict, obj=a))
else:
new_args.append(deepcopy(a))
for key, a in kwargs.items():
print(key, a)
if in_dict(a, arg_dict):
new_kwargs[key] = _make_copy_object(arg_dict, obj=a)
else:
new_kwargs[key] = deepcopy(a)
print("create ", cls, new_args, new_kwargs, flush=True)
return cls(*new_args, **new_kwargs)
def in_dict(obj, list_or_dict):
if isinstance(obj, Hashable):
return obj in list_or_dict
else:
if isinstance(list_or_dict, (tuple, list)):
for key in list_or_dict:
if id(key) == id(obj):
return True
return False
elif isinstance(list_or_dict, dict):
for key, val in list_or_dict.items():
if id(key) == id(obj):
return True
return False
| 3,091 | 30.55102 | 114 | py |
baconian-project | baconian-project-master/baconian/core/parameters.py | from typeguard import typechecked
from baconian.config.dict_config import DictConfig
import abc
from baconian.common.logging import Logger
import baconian.common.files as files
import os
from baconian.common.schedules import Scheduler
from copy import deepcopy, copy
class Parameter(object):
# TODO
def __init__(self):
pass
class Parameters(object):
"""
A class that handle all parameters of a certain rl, to be better support in the future version.
Currently, just a very simple implementation
"""
@typechecked
def __init__(self, parameters: dict,
source_config: DictConfig = None,
name='parameters',
to_scheduler_param_tuple: tuple = None,
default_save_param_key=None):
self._parameters = parameters
self.name = name
self._source_config = source_config if source_config else DictConfig(required_key_dict=dict(),
config_dict=dict())
self.default_save_param_key = default_save_param_key
self._scheduler_info_dict = dict()
self.to_scheduler_param_list = to_scheduler_param_tuple
def __call__(self, key=None):
if key:
if key in self._scheduler_info_dict:
new_val = self._scheduler_info_dict[key]['scheduler'].value()
if key in self._parameters:
self._parameters[key] = new_val
else:
self._source_config.set(key, new_val)
return new_val
if isinstance(self._parameters, dict):
if key in self._parameters:
return self._parameters[key]
else:
return self._source_config(key)
else:
raise ValueError('parameters is not dict')
else:
raise KeyError('specific a key to call {}'.format(type(self).__name__))
def __getitem__(self, item):
return self.__call__(key=item)
def init(self):
if self.to_scheduler_param_list:
for val_dict in self.to_scheduler_param_list:
self.set_scheduler(**val_dict)
def copy_from(self, source_parameter):
if not isinstance(source_parameter, type(self)):
raise TypeError()
self._update_dict(source_dict=source_parameter._parameters,
target_dict=self._parameters)
self._source_config.config_dict = source_parameter._source_config.config_dict
self.default_save_param_key = copy(source_parameter.default_save_param_key)
if source_parameter.to_scheduler_param_list:
self._scheduler_info_dict = dict()
self.to_scheduler_param_list = copy(source_parameter.to_scheduler_param_list)
if self.to_scheduler_param_list:
for val_dict in self.to_scheduler_param_list:
self.set_scheduler(**val_dict)
def _update_dict(self, source_dict: dict, target_dict: dict):
for key, val in source_dict.items():
target_dict[key] = val
def save(self, save_path, global_step, name=None, default_save_param=None, *args, **kwargs):
if default_save_param is None:
default_save_param = dict(_parameters=self._parameters, _source_config=self._source_config.config_dict)
if not name:
name = self.name
Logger().out_to_file(file_path=save_path,
file_name='{}-{}.json'.format(name, global_step),
content=default_save_param)
def load(self, load_path, name, global_step, *args, **kwargs):
res = files.load_json(file_path=os.path.join(load_path, "{}-{}.json".format(name, global_step)))
# todo this mapping can be done via a dict structure
if '_parameters' in res:
setattr(self, '_parameters', res['_parameters'])
if '_source_config' in res:
setattr(self._source_config, 'config_dict', res['_source_config'])
@typechecked
def set_scheduler(self, param_key: str, scheduler: Scheduler, **kwargs):
ori_value = self(param_key)
scheduler.initial_p = ori_value
self._scheduler_info_dict[param_key] = dict(param_key=param_key, scheduler=scheduler)
def update(self, *args, **kwargs):
for key, val in self._scheduler_info_dict.items():
self.set(key=val['param_key'],
new_val=val['scheduler'].value())
def set(self, key, new_val):
if not isinstance(new_val, type(self(key))):
raise TypeError('new value of parameters {} should be type {} instead of {}'.format(key, type(self(key)),
type(new_val)))
elif key in self._parameters:
self._parameters[key] = new_val
else:
self._source_config.set(key, new_val)
| 5,012 | 41.12605 | 117 | py |
baconian-project | baconian-project-master/baconian/core/__init__.py | # from mobrl.core.config import Config
# from mobrl.core.pipeline import Pipeline
# from mobrl.core.global_config import GlobalConfig
# from mobrl.core.basic import Basic
| 171 | 33.4 | 51 | py |
baconian-project | baconian-project-master/baconian/core/ensemble.py | from baconian.core.core import Basic, Env
from baconian.algo.dynamics.dynamics_model import DynamicsModel, DynamicsEnvWrapper
from baconian.algo.algo import Algo
import numpy as np
import abc
class Ensemble(Basic):
def init(self, *args, **kwargs):
raise NotImplementedError
def save(self, *args, **kwargs):
raise NotImplementedError
def load(self, *args, **kwargs):
raise NotImplementedError
class ModelEnsemble(Ensemble, DynamicsModel):
def __init__(self, model, n_models=1, prediction_type='random', *args, **kwargs):
"""
:param model:
:param n_models:
:param prediction_type:
"""
super().__init__(*args, **kwargs)
self._prediction_type = prediction_type
self._observations = list()
self._model = list()
self._name = kwargs.pop('name', 'dynamics_model')
if isinstance(model, DynamicsModel):
for a in range(n_models):
self._model.append(model.make_copy())
else:
base_env_spec = model[0].env_spec
for b in range(len(model)):
if model[b].env_spec != base_env_spec:
raise TypeError('EnvSpec of list of models do not match.')
self._model = model
self.state = None
def train(self, *args, **kwargs):
res = {}
for idx in range(len(self._model)):
res['model_{}'.format(idx)] = self._model[idx].train(*args, **kwargs)
return res
def reset_state(self, *args, **kwargs):
"""
Reset the model parameters.
"""
self._observations = list()
for m in self.model:
m.reset_state(*args, **kwargs)
self._observations.append(m.state)
if self._prediction_type == 'mean':
self.state = np.mean(self._observations, axis=0)
elif self._prediction_type == 'random':
self.state = self._observations[np.random.randint(low=0, high=len(self.model))]
else:
raise ValueError
def init(self, *args, **kwargs):
for m in self.model:
m.init(*args, **kwargs)
self.reset_state()
def step(self, *args, **kwargs):
self._observations = list()
for m in self.model:
self._observations.append(m.step(*args, **kwargs))
if self._prediction_type == 'mean':
results = np.mean(self._observations, axis=0)
elif self._prediction_type == 'random':
results = self._observations[np.random.randint(low=0, high=len(self.model))]
else:
raise ValueError
return results
def get_obs(self):
return self._observations
@abc.abstractmethod
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
raise NotImplementedError
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
return True
def make_copy(self):
raise NotImplementedError
def return_as_env(self) -> Env:
return DynamicsEnvWrapper(dynamics=self,
name=self._name + '_env')
def save(self, *args, **kwargs):
for model in self._model:
model.save(*args, **kwargs)
def load(self, *args, **kwargs):
raise NotImplementedError
@property
def model(self):
"""
Returns:
The list of the models in the ensemble.
"""
return self._model
def __len__(self):
return len(self._model)
def __getitem__(self, idx):
return self._model[idx]
class AlgoEnsemble(Ensemble, Algo):
def __init__(self, cls, n_algos, prediction_type='random', *args, **kwargs):
super().__init__(*args, **kwargs)
self._cls = cls
self._n_algos = n_algos
self._prediction_type = prediction_type
self._predictions = list()
self._algo = list()
self._name = kwargs.pop('name', 'algo')
for num in range(n_algos):
self._algo.append(cls(*args, **dict(kwargs, name=self._name + '_' + num)))
def init(self, *args, **kwargs):
raise NotImplementedError
def save(self, *args, **kwargs):
raise NotImplementedError
def load(self, *args, **kwargs):
raise NotImplementedError
@property
def algo(self):
"""
Returns:
The list of the models in the ensemble.
"""
return self._algo
def __len__(self):
return len(self._algo)
def __getitem__(self, idx):
return self._algo[idx]
| 4,729 | 26.988166 | 110 | py |
baconian-project | baconian-project-master/baconian/core/tuner.py | from baconian.common.logging import Recorder
class Tuner(object):
"""
Auto hyper parameter tuning module, tobe done
"""
def __init__(self):
self.recorder = Recorder(default_obj=self)
| 209 | 20 | 50 | py |
baconian-project | baconian-project-master/baconian/core/global_var.py | _global_obj_arg_dict = {}
_global_name_dict = {}
assert id(_global_obj_arg_dict) == id(globals()['_global_obj_arg_dict']) == id(locals()['_global_obj_arg_dict'])
assert id(_global_name_dict) == id(globals()['_global_name_dict']) == id(locals()['_global_name_dict'])
def reset_all():
globals()['_global_obj_arg_dict'] = {}
globals()['_global_name_dict'] = {}
def reset(key: str):
globals()[key] = {}
def get_all() -> dict:
return dict(
_global_obj_arg_dict=globals()['_global_obj_arg_dict'],
_global_name_dict=globals()['_global_name_dict']
)
| 586 | 24.521739 | 112 | py |
baconian-project | baconian-project-master/baconian/core/flow/dyna_flow.py | from baconian.core.flow.train_test_flow import Flow
from baconian.config.global_config import GlobalConfig
from baconian.common.logging import ConsoleLogger
from baconian.config.dict_config import DictConfig
from baconian.common.misc import *
from baconian.core.parameters import Parameters
from baconian.core.status import *
class DynaFlow(Flow):
"""
A typical flow for utilizing the model-based algo, it is not restricted to Dyna algorithms,
but can be utilized by others.
"""
required_func = ('train_algo', 'train_algo_from_synthesized_data', 'train_dynamics', 'test_algo', 'test_dynamics',
'sample_from_real_env', 'sample_from_dynamics_env')
required_key_dict = {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 1000,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 1000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 1000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 1000,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 1000,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 1000
}
def __init__(self,
train_sample_count_func,
config_or_config_dict: (DictConfig, dict),
func_dict: dict, ):
"""
:param train_sample_count_func: a function indicates how much training samples the agent has collected currently.
:type train_sample_count_func: method
:param config_or_config_dict: a Config or a dict should have the keys: (TEST_EVERY_SAMPLE_COUNT, TRAIN_EVERY_SAMPLE_COUNT, START_TRAIN_AFTER_SAMPLE_COUNT, START_TEST_AFTER_SAMPLE_COUNT)
:type config_or_config_dict: Config or dict
:param func_dict: function dict, holds the keys: 'sample', 'train', 'test'. each item in the dict as also should be a dict, holds the keys 'func', 'args', 'kwargs'
:type func_dict: dict
"""
super().__init__(func_dict)
super(DynaFlow, self).__init__(func_dict=func_dict)
config = construct_dict_config(config_or_config_dict, obj=self)
self.parameters = Parameters(source_config=config, parameters=dict())
self.time_step_func = train_sample_count_func
self._last_train_algo_point = -1
self._last_train_algo_point_from_dynamics = -1
self._last_test_algo_point = -1
self._last_train_dynamics_point = -1
self._last_test_dynamics_point = -1
assert callable(train_sample_count_func)
def _launch(self) -> bool:
"""
Launch the flow until it finished or catch a system-allowed errors
(e.g., out of GPU memory, to ensure the log will be saved safely).
:return: True if the flow correctly executed and finished
:rtype: bool
"""
while True:
real_batch_data = self._call_func('sample_from_real_env')
if self.time_step_func() - self.parameters(
'TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV') >= self._last_train_algo_point and self.time_step_func() > self.parameters(
'START_TRAIN_ALGO_AFTER_SAMPLE_COUNT'):
self._last_train_algo_point = self.time_step_func()
self._call_func('train_algo')
if self.time_step_func() - self.parameters(
'TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV') >= self._last_train_algo_point_from_dynamics and self.time_step_func() > self.parameters(
'START_TRAIN_ALGO_AFTER_SAMPLE_COUNT') and self.time_step_func() >= self.parameters(
'WARM_UP_DYNAMICS_SAMPLES'):
batch_data = self._call_func('sample_from_dynamics_env')
self._call_func('train_algo_from_synthesized_data', batch_data=batch_data)
self._last_train_algo_point_from_dynamics = self.time_step_func()
if self.time_step_func() - self.parameters(
'TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT') >= self._last_train_dynamics_point and self.time_step_func() > self.parameters(
'START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT'):
self._last_train_algo_point = self.time_step_func()
self._call_func('train_dynamics', batch_data=real_batch_data)
if self.time_step_func() - self.parameters(
'TEST_ALGO_EVERY_REAL_SAMPLE_COUNT') >= self._last_test_algo_point and self.time_step_func() > self.parameters(
'START_TEST_ALGO_AFTER_SAMPLE_COUNT'):
self._last_test_algo_point = self.time_step_func()
self._call_func('test_algo')
if self.time_step_func() - self.parameters(
'TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT') >= self._last_test_dynamics_point and self.time_step_func() > self.parameters(
'START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT'):
self._last_test_dynamics_point = self.time_step_func()
self._call_func('test_dynamics')
if self._is_ended() is True:
break
return True
def _is_ended(self):
"""
:return: True if an experiment is ended
:rtype: bool
"""
key_founded_flag = False
finished_flag = False
for key in GlobalConfig().DEFAULT_EXPERIMENT_END_POINT:
if GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key] is not None:
key_founded_flag = True
if get_global_status_collect()(key) >= GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]:
ConsoleLogger().print('info',
'pipeline ended because {}: {} >= end point value {}'.
format(key, get_global_status_collect()(key),
GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]))
finished_flag = True
if key_founded_flag is False:
ConsoleLogger().print(
'warning',
'{} in experiment_end_point is not registered with global status collector: {}, experiment may not end'.
format(GlobalConfig().DEFAULT_EXPERIMENT_END_POINT, list(get_global_status_collect()().keys())))
return finished_flag
def create_dyna_flow(train_algo_func, train_algo_from_synthesized_data_func,
train_dynamics_func, test_algo_func, test_dynamics_func, sample_from_real_env_func,
sample_from_dynamics_env_func,
test_algo_every_real_sample_count,
test_dynamics_every_real_sample_count,
train_algo_every_real_sample_count_by_data_from_real_env,
train_algo_every_real_sample_count_by_data_from_dynamics_env,
train_dynamics_ever_real_sample_count,
start_train_algo_after_sample_count,
start_train_dynamics_after_sample_count,
start_test_dynamics_after_sample_count,
start_test_algo_after_sample_count,
warm_up_dynamics_samples,
train_samples_counter_func=None):
config_dict = dict(
TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV=train_algo_every_real_sample_count_by_data_from_real_env,
TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV=train_algo_every_real_sample_count_by_data_from_dynamics_env,
TEST_ALGO_EVERY_REAL_SAMPLE_COUNT=test_algo_every_real_sample_count,
TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT=test_dynamics_every_real_sample_count,
TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT=train_dynamics_ever_real_sample_count,
START_TRAIN_ALGO_AFTER_SAMPLE_COUNT=start_train_algo_after_sample_count,
START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT=start_train_dynamics_after_sample_count,
START_TEST_ALGO_AFTER_SAMPLE_COUNT=start_test_algo_after_sample_count,
START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT=start_test_dynamics_after_sample_count,
WARM_UP_DYNAMICS_SAMPLES=warm_up_dynamics_samples,
)
def return_func_dict(s_dict):
return dict(func=s_dict[0],
args=s_dict[1],
kwargs=s_dict[2])
func_dict = dict(
train_algo=return_func_dict(train_algo_func),
train_algo_from_synthesized_data=return_func_dict(train_algo_from_synthesized_data_func),
train_dynamics=return_func_dict(train_dynamics_func),
test_algo=return_func_dict(test_algo_func),
test_dynamics=return_func_dict(test_dynamics_func),
sample_from_real_env=return_func_dict(sample_from_real_env_func),
sample_from_dynamics_env=return_func_dict(sample_from_dynamics_env_func),
)
if train_samples_counter_func is None:
def default_train_samples_counter_func():
return get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
train_samples_counter_func = default_train_samples_counter_func
return DynaFlow(config_or_config_dict=config_dict,
train_sample_count_func=train_samples_counter_func,
func_dict=func_dict)
| 9,349 | 51.52809 | 193 | py |
baconian-project | baconian-project-master/baconian/core/flow/me_ppo_flow.py | from baconian.core.flow.train_test_flow import Flow
from baconian.config.global_config import GlobalConfig
from baconian.common.logging import ConsoleLogger
from baconian.config.dict_config import DictConfig
from baconian.common.misc import *
from baconian.core.parameters import Parameters
from baconian.core.status import *
class MEPPO_Flow(Flow):
"""
A typical flow for utilizing the model-based algo, it is not restricted to Dyna algorithms,
but can be utilized by others.
"""
required_func = ('train_algo', 'train_algo_from_synthesized_data', 'train_dynamics', 'test_algo', 'test_dynamics',
'sample_from_real_env', 'sample_from_dynamics_env', 'validate_policy_on_ensemble')
required_key_dict = {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 1000,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 1000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 1000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 1000,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 1000,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 1000,
"VALIDATION_THRESHOLD": 0.7,
"VALIDATION_EVERY_FICTITIOUS_SET": 5,
"SAMPLE_BEYOND_STOP_IMPROVEMENT": 10,
}
def __init__(self,
train_sample_count_func,
config_or_config_dict: (DictConfig, dict),
func_dict: dict, ):
super().__init__(func_dict)
super(MEPPO_Flow, self).__init__(func_dict=func_dict)
config = construct_dict_config(config_or_config_dict, obj=self)
self.parameters = Parameters(source_config=config, parameters=dict())
self.time_step_func = train_sample_count_func
self._last_train_algo_point = -1
self._start_train_algo_point_from_dynamics = -1
self._last_test_algo_point = -1
self._start_train_dynamics_point = -1
self._last_test_dynamics_point = -1
self._last_performance = 0
self._last_chance = 0
self._fictitious_set_count = 0
assert callable(train_sample_count_func)
def _launch(self) -> bool:
while True:
if self._is_ended() is True:
break
real_batch_data = self._call_func('sample_from_real_env')
if self.time_step_func() - self._start_train_dynamics_point <= self.parameters(
'TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT') and \
self.time_step_func() > self.parameters('START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT'):
self._last_train_algo_point = self.time_step_func()
self._call_func('train_dynamics', batch_data=real_batch_data)
if self.time_step_func() <= self.parameters('WARM_UP_DYNAMICS_SAMPLES'):
continue
else:
while True:
if self.time_step_func() - self._start_train_algo_point_from_dynamics <= self.parameters(
'TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV') and \
self.time_step_func() > self.parameters(
'START_TRAIN_ALGO_AFTER_SAMPLE_COUNT'):
batch_data = self._call_func('sample_from_dynamics_env')
self._call_func('train_algo_from_synthesized_data', batch_data=batch_data)
else:
self._fictitious_set_count += 1
self._last_chance += 1
if self._fictitious_set_count >= self.parameters('VALIDATION_EVERY_FICTITIOUS_SET'):
performance = self._call_func('validate_policy_on_ensemble')
if performance < self.parameters('VALIDATION_THRESHOLD'):
if self._last_chance > self.parameters('SAMPLE_BEYOND_IMPROVEMENT'):
self._last_chance = 0
self._start_train_dynamics_point = self.time_step_func()
break
self._start_train_algo_point_from_dynamics = self.time_step_func()
return True
def _is_ended(self):
key_founded_flag = False
finished_flag = False
for key in GlobalConfig().DEFAULT_EXPERIMENT_END_POINT:
if GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key] is not None:
key_founded_flag = True
if get_global_status_collect()(key) >= GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]:
ConsoleLogger().print('info',
'pipeline ended because {}: {} >= end point value {}'.
format(key, get_global_status_collect()(key),
GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]))
finished_flag = True
if key_founded_flag is False:
ConsoleLogger().print(
'warning',
'{} in experiment_end_point is not registered with global status collector: {}, experiment may not end'.
format(GlobalConfig().DEFAULT_EXPERIMENT_END_POINT, list(get_global_status_collect()().keys())))
return finished_flag
def create_meppo_flow(train_algo_func, train_algo_from_synthesized_data_func,
train_dynamics_func, test_algo_func, test_dynamics_func, sample_from_real_env_func,
sample_from_dynamics_env_func,
validate_policy_on_ensemble_func,
test_algo_every_real_sample_count,
test_dynamics_every_real_sample_count,
train_algo_every_real_sample_count_by_data_from_real_env,
train_algo_every_real_sample_count_by_data_from_dynamics_env,
train_dynamics_every_real_sample_count,
start_train_algo_after_sample_count,
start_train_dynamics_after_sample_count,
start_test_dynamics_after_sample_count,
start_test_algo_after_sample_count,
warm_up_dynamics_samples,
validation_threshold,
validation_every_fictitious_set,
sample_beyond_stop_improvement,
train_samples_counter_func=None):
config_dict = dict(
TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV=train_algo_every_real_sample_count_by_data_from_real_env,
TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV=train_algo_every_real_sample_count_by_data_from_dynamics_env,
TEST_ALGO_EVERY_REAL_SAMPLE_COUNT=test_algo_every_real_sample_count,
TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT=test_dynamics_every_real_sample_count,
TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT=train_dynamics_every_real_sample_count,
START_TRAIN_ALGO_AFTER_SAMPLE_COUNT=start_train_algo_after_sample_count,
START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT=start_train_dynamics_after_sample_count,
START_TEST_ALGO_AFTER_SAMPLE_COUNT=start_test_algo_after_sample_count,
START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT=start_test_dynamics_after_sample_count,
WARM_UP_DYNAMICS_SAMPLES=warm_up_dynamics_samples,
VALIDATION_THRESHOLD=validation_threshold,
VALIDATION_EVERY_FICTITIOUS_SET=validation_every_fictitious_set,
SAMPLE_BEYOND_STOP_IMPROVEMENT=sample_beyond_stop_improvement,
)
def return_func_dict(s_dict):
return dict(func=s_dict[0],
args=s_dict[1],
kwargs=s_dict[2])
func_dict = dict(
train_algo=return_func_dict(train_algo_func),
train_algo_from_synthesized_data=return_func_dict(train_algo_from_synthesized_data_func),
train_dynamics=return_func_dict(train_dynamics_func),
test_algo=return_func_dict(test_algo_func),
test_dynamics=return_func_dict(test_dynamics_func),
sample_from_real_env=return_func_dict(sample_from_real_env_func),
sample_from_dynamics_env=return_func_dict(sample_from_dynamics_env_func),
validate_policy_on_ensemble=return_func_dict(validate_policy_on_ensemble_func)
)
if train_samples_counter_func is None:
def default_train_samples_counter_func():
return get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
train_samples_counter_func = default_train_samples_counter_func
return MEPPO_Flow(config_or_config_dict=config_dict,
train_sample_count_func=train_samples_counter_func,
func_dict=func_dict)
| 8,906 | 50.485549 | 122 | py |
baconian-project | baconian-project-master/baconian/core/flow/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/core/flow/train_test_flow.py | import abc
from baconian.config.global_config import GlobalConfig
from baconian.common.logging import ConsoleLogger
from baconian.config.dict_config import DictConfig
from baconian.common.misc import *
from baconian.core.parameters import Parameters
from baconian.core.status import *
from baconian.common.error import *
class Flow(object):
"""
Interface of experiment flow module, it defines the workflow of the reinforcement learning experiments.
"""
required_func = ()
required_key_dict = dict()
def __init__(self, func_dict):
"""
Constructor for Flow.
:param func_dict: the function and its arguments that will be called in the Flow
:type func_dict: dict
"""
self.func_dict = func_dict
for key in self.required_func:
if key not in func_dict:
raise MissedConfigError('miss key {}'.format(key))
def launch(self) -> bool:
"""
Launch the flow until it finished or catch a system-allowed errors (e.g., out of GPU memory, to ensure the log will be saved safely).
:return: True if the flow correctly executed and finished
:rtype: bool
"""
try:
return self._launch()
except GlobalConfig().DEFAULT_ALLOWED_EXCEPTION_OR_ERROR_LIST as e:
ConsoleLogger().print('error', 'error {} occurred'.format(e))
return False
def _launch(self) -> bool:
"""
Abstract method to be implemented by subclass for a certain workflow.
:return: True if the flow correctly executed and finished
:rtype: bool
"""
raise NotImplementedError
def _call_func(self, key, **extra_kwargs):
"""
Call a function that is pre-defined in self.func_dict
:param key: name of the function, e.g., train, test, sample.
:type key: str
:param extra_kwargs: some extra kwargs you may want to be passed in the function calling
:return: actual return value of the called function if self.func_dict has such function otherwise None.
:rtype:
"""
if self.func_dict[key]:
return self.func_dict[key]['func'](*self.func_dict[key]['args'],
**extra_kwargs,
**self.func_dict[key]['kwargs'])
else:
return None
class TrainTestFlow(Flow):
"""
A typical sampling-trainning and testing workflow, that used by most of model-free/model-based reinforcement
learning method. Typically, it repeat the sampling(saving to memory if off policy)->training(from memory if
off-policy, from samples if on-policy)->test
"""
required_func = ('train', 'test', 'sample')
required_key_dict = {
"TEST_EVERY_SAMPLE_COUNT": 1000,
"TRAIN_EVERY_SAMPLE_COUNT": 1000,
"START_TRAIN_AFTER_SAMPLE_COUNT": 1,
"START_TEST_AFTER_SAMPLE_COUNT": 1,
}
def __init__(self,
train_sample_count_func,
config_or_config_dict: (DictConfig, dict),
func_dict: dict,
):
"""
Constructor of TrainTestFlow
:param train_sample_count_func: a function indicates how much training samples the agent has collected currently.
:type train_sample_count_func: method
:param config_or_config_dict: a Config or a dict should have the keys: (TEST_EVERY_SAMPLE_COUNT, TRAIN_EVERY_SAMPLE_COUNT, START_TRAIN_AFTER_SAMPLE_COUNT, START_TEST_AFTER_SAMPLE_COUNT)
:type config_or_config_dict: Config or dict
:param func_dict: function dict, holds the keys: 'sample', 'train', 'test'. each item in the dict as also should be a dict, holds the keys 'func', 'args', 'kwargs'
:type func_dict: dict
"""
super(TrainTestFlow, self).__init__(func_dict=func_dict)
config = construct_dict_config(config_or_config_dict, obj=self)
self.parameters = Parameters(source_config=config, parameters=dict())
self.time_step_func = train_sample_count_func
self.last_train_point = -1
self.last_test_point = -1
assert callable(train_sample_count_func)
def _launch(self) -> bool:
"""
Launch the flow until it finished or catch a system-allowed errors
(e.g., out of GPU memory, to ensure the log will be saved safely).
:return: True if the flow correctly executed and finished
:rtype: bool
"""
while True:
self._call_func('sample')
if self.time_step_func() - self.parameters('TRAIN_EVERY_SAMPLE_COUNT') >= self.last_train_point and \
self.time_step_func() > self.parameters('START_TRAIN_AFTER_SAMPLE_COUNT'):
self.last_train_point = self.time_step_func()
self._call_func('train')
if self.time_step_func() - self.parameters('TEST_EVERY_SAMPLE_COUNT') >= self.last_test_point and \
self.time_step_func() > self.parameters('START_TEST_AFTER_SAMPLE_COUNT'):
self.last_test_point = self.time_step_func()
self._call_func('test')
if self._is_ended() is True:
break
return True
def _is_ended(self):
"""
:return: True if an experiment is ended
:rtype: bool
"""
key_founded_flag = False
finished_flag = False
for key in GlobalConfig().DEFAULT_EXPERIMENT_END_POINT:
if GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key] is not None:
key_founded_flag = True
if get_global_status_collect()(key) >= GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]:
ConsoleLogger().print('info',
'pipeline ended because {}: {} >= end point value {}'.
format(key, get_global_status_collect()(key),
GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[key]))
finished_flag = True
if key_founded_flag is False:
ConsoleLogger().print(
'warning',
'{} in experiment_end_point is not registered with global status collector: {}, experiment may not end'.
format(GlobalConfig().DEFAULT_EXPERIMENT_END_POINT, list(get_global_status_collect()().keys())))
return finished_flag
def create_train_test_flow(test_every_sample_count, train_every_sample_count, start_train_after_sample_count,
start_test_after_sample_count, train_func_and_args, test_func_and_args, sample_func_and_args,
train_samples_counter_func=None):
config_dict = dict(
TEST_EVERY_SAMPLE_COUNT=test_every_sample_count,
TRAIN_EVERY_SAMPLE_COUNT=train_every_sample_count,
START_TRAIN_AFTER_SAMPLE_COUNT=start_train_after_sample_count,
START_TEST_AFTER_SAMPLE_COUNT=start_test_after_sample_count,
)
def return_func_dict(s_dict):
return dict(func=s_dict[0],
args=s_dict[1],
kwargs=s_dict[2])
func_dict = dict(
train=return_func_dict(train_func_and_args),
test=return_func_dict(test_func_and_args),
sample=return_func_dict(sample_func_and_args),
)
if train_samples_counter_func is None:
def default_train_samples_counter_func():
return get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
train_samples_counter_func = default_train_samples_counter_func
return TrainTestFlow(config_or_config_dict=config_dict,
train_sample_count_func=train_samples_counter_func,
func_dict=func_dict)
| 7,842 | 41.394595 | 193 | py |
baconian-project | baconian-project-master/baconian/envs/envs_reward_func.py | """
Reward functions in most of some model-based methods are tricky problems,
since model itself require a reward function from outside.
Most of the codebase tune the definition of the reward function differed from original one
without explicit notifying which bring the difficult for users to tune the algorithms without the effect
brought by reward functions.
In Baconian, we try to clarify this part, make sure the user is well aware of the effect of such implementations.
This is a work in progress.
"""
from baconian.algo.dynamics.reward_func.reward_func import RewardFunc
import numpy as np
class PendulumRewardFunc(RewardFunc):
def __init__(self, name='pendulum_reward_func'):
super().__init__(name)
self.max_speed = 8
self.max_torque = 2.
self.dt = .05
def __call__(self, state, action, new_state, **kwargs) -> float:
th = state[0]
thdot = state[1]
u = np.clip(action, -self.max_torque, self.max_torque)[0]
costs = angle_normalize(th) ** 2 + .1 * thdot ** 2 + .001 * (u ** 2)
return float(-costs)
def init(self):
super().init()
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
REWARD_FUNC_DICT = {
'Pendulum-v0': PendulumRewardFunc
}
| 1,274 | 27.333333 | 113 | py |
baconian-project | baconian-project-master/baconian/envs/dmcontrol_env.py | from baconian.core.core import Env, EnvSpec
have_mujoco_flag = True
try:
from dm_control import mujoco
from gym.envs.mujoco import mujoco_env
from dm_control import suite
from dm_control.rl.specs import ArraySpec
from dm_control.rl.specs import BoundedArraySpec
from collections import OrderedDict
except Exception:
have_mujoco_flag = False
import numpy as np
import types
from gym.spaces import *
import baconian.common.spaces as garage_space
def convert_dm_control_to_gym_space(dm_control_space):
r"""Convert dm_control space to gym space. """
if isinstance(dm_control_space, BoundedArraySpec):
space = Box(low=dm_control_space.minimum,
high=dm_control_space.maximum,
dtype=dm_control_space.dtype)
assert space.shape == dm_control_space.shape
return garage_space.Box(low=space.low, high=space.high)
elif isinstance(dm_control_space, ArraySpec) and not isinstance(dm_control_space, BoundedArraySpec):
space = Box(low=-float('inf'),
high=float('inf'),
shape=dm_control_space.shape,
dtype=dm_control_space.dtype)
return garage_space.Box(low=space.low, high=space.high)
elif isinstance(dm_control_space, OrderedDict):
space = Dict(OrderedDict([(key, convert_dm_control_to_gym_space(value))
for key, value in dm_control_space.items()]))
return garage_space.Dict(space.spaces)
else:
raise NotImplementedError
_env_inited_count = dict()
# def make(gym_env_id, allow_multiple_env=True):
# """
#
# :param gym_env_id:
# :param allow_multiple_env:
# :return:
# """
# if allow_multiple_env is True:
#
# if gym_env_id not in _env_inited_count:
# _env_inited_count[gym_env_id] = 0
# else:
# _env_inited_count[gym_env_id] += 1
#
# return GymEnv(gym_env_id, name='{}_{}'.format(gym_env_id, _env_inited_count[gym_env_id]))
# else:
# return GymEnv(gym_env_id)
class DMControlEnv(Env):
"""
DeepMind Control Suite environment wrapping module
"""
def __init__(self, dmcs_env_id: str, name: str = None):
"""
:param dmcs_env_id:
:param name:
"""
super().__init__(name=name if name else dmcs_env_id)
self.env_id = dmcs_env_id
self.timestep = {}
try:
self.env = suite.load(dmcs_env_id, name)
except ValueError:
raise ValueError('Env id: {} and task: {} is not supported currently'.format(dmcs_env_id, name))
self.metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.env.control_timestep()))}
self.action_space = convert_dm_control_to_gym_space(self.env.action_spec())
self.observation_space = convert_dm_control_to_gym_space(self.env.observation_spec())
if isinstance(self.action_space, garage_space.Box):
self.action_space.low = np.nan_to_num(self.action_space.low)
self.action_space.high = np.nan_to_num(self.action_space.high)
self.action_space.sample = types.MethodType(self._sample_with_nan, self.action_space)
if isinstance(self.observation_space, garage_space.Box):
self.observation_space.low = np.nan_to_num(self.observation_space.low)
self.observation_space.high = np.nan_to_num(self.observation_space.high)
self.observation_space.sample = types.MethodType(self._sample_with_nan, self.observation_space)
self.env_spec = EnvSpec(obs_space=self.observation_space,
action_space=self.action_space)
self.viewer = None
def step(self, action):
"""
:param action:
:return:
"""
super().step(action)
self.timestep = self.env.step(action)
observation = self.timestep.observation
reward = self.timestep.reward
done = self.timestep.last()
info = {}
return observation, reward, done, info
def reset(self):
"""
:return:
"""
super().reset()
return self.env.reset()
def init(self):
"""
:return:
"""
super().init()
return self.reset()
def seed(self, seed=None):
"""
:param seed:
:return:
"""
return self.env.task.random.seed(seed)
def get_state(self):
"""
:return:
"""
if self.timestep != {}:
return self.timestep.observation
else:
raise ValueError('Env id: {} does not have an observation yet.'.format(self.env_id))
@staticmethod
def _sample_with_nan(space: garage_space.Space):
"""
:param space:
:return:
"""
assert isinstance(space, garage_space.Box)
high = np.ones_like(space.low)
low = -1 * np.ones_like(space.high)
return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),
a_min=space.low,
a_max=space.high)
if __name__ == '__main__':
a = suite.load("cartpole", "swingup")
a = DMControlEnv("cartpole", "swingup")
| 5,332 | 30.370588 | 108 | py |
baconian-project | baconian-project-master/baconian/envs/gym_env.py | from baconian.core.core import Env, EnvSpec
import gym.envs
from gym.envs.registration import registry
# do not remove the following import statements
import pybullet
import pybullet_envs
have_mujoco_flag = True
try:
from gym.envs.mujoco import mujoco_env
except Exception:
have_mujoco_flag = False
import numpy as np
import types
import gym.spaces as GymSpace
import baconian.common.spaces as garage_space
import gym.error as gym_error
_env_inited_count = dict()
def make(gym_env_id: str, allow_multiple_env=True):
"""
:param gym_env_id: gym environment id
:type gym_env_id: int
:param allow_multiple_env: allow multiple environments, by default True
:type allow_multiple_env: bool
:return: new gym environment
:rtype: GymEnv
"""
if allow_multiple_env is True:
if gym_env_id not in _env_inited_count:
_env_inited_count[gym_env_id] = 0
else:
_env_inited_count[gym_env_id] += 1
return GymEnv(gym_env_id, name='{}_{}'.format(gym_env_id, _env_inited_count[gym_env_id]))
else:
return GymEnv(gym_env_id)
def space_converter(space: GymSpace.Space):
"""
Convert space into any one of "Box", "Discrete", or "Tuple" type.
:param space: space of gym environment
:type space: GymSpace
:return: converted space
:rtype: Box, Discrete, or Tuple
"""
if isinstance(space, GymSpace.Box):
return garage_space.Box(low=space.low, high=space.high)
elif isinstance(space, GymSpace.Dict):
return garage_space.Dict(space.spaces)
elif isinstance(space, GymSpace.Discrete):
return garage_space.Discrete(space.n)
elif isinstance(space, GymSpace.Tuple):
return garage_space.Tuple(list(map(space_converter, space.spaces)))
else:
raise NotImplementedError
class GymEnv(Env):
"""
Gym environment wrapping module
"""
_all_gym_env_id = list(registry.env_specs.keys())
def __init__(self, gym_env_id: str, name: str = None):
"""
:param gym_env_id: gym environment id
:type gym_env_id: str
:param name: name of the gym environment instance
:type name: str
"""
super().__init__(name=name if name else gym_env_id)
self.env_id = gym_env_id
try:
self._gym_env = gym.make(gym_env_id)
except gym_error.UnregisteredEnv:
raise ValueError('Env id: {} is not supported currently'.format(gym_env_id))
self._gym_env = gym.make(gym_env_id)
self.action_space = space_converter(self._gym_env.action_space)
self.observation_space = space_converter(self._gym_env.observation_space)
if isinstance(self.action_space, garage_space.Box):
self.action_space.low = np.nan_to_num(self.action_space.low)
self.action_space.high = np.nan_to_num(self.action_space.high)
self.action_space.sample = types.MethodType(self._sample_with_nan, self.action_space)
if isinstance(self.observation_space, garage_space.Box):
self.observation_space.low = np.nan_to_num(self.observation_space.low)
self.observation_space.high = np.nan_to_num(self.observation_space.high)
self.observation_space.sample = types.MethodType(self._sample_with_nan, self.observation_space)
self.env_spec = EnvSpec(obs_space=self.observation_space,
action_space=self.action_space)
self.reward_range = self._gym_env.reward_range
def step(self, action):
"""
:param action: action to be taken by agent in the environment
:type action: action to be taken by agent in the environment
:return: step of the unwrapped environment
:rtype: gym env
"""
super().step(action)
action = self.env_spec.flat_action(action)
state, re, done, info = self.unwrapped.step(action=action)
return state, re, bool(done), info
def reset(self):
"""
Reset the gym environment.
:return:
"""
super().reset()
return self.unwrapped.reset()
def init(self):
"""
Initialize the gym environment.
:return:
"""
super().init()
return self.reset()
def seed(self, seed=None):
"""
:param seed: seed of random number generalization
:type seed: int
:return: seed of the unwrapped environment
:rtype: int
"""
return super().seed(seed)
def get_state(self):
"""
:return:the state of unwrapped gym environment
:rtype: np.ndarray
"""
if (have_mujoco_flag is True and isinstance(self.unwrapped_gym, mujoco_env.MujocoEnv)) or (
hasattr(self.unwrapped_gym, '_get_obs') and callable(self.unwrapped_gym._get_obs)):
return self.unwrapped_gym._get_obs()
elif hasattr(self.unwrapped_gym, '_get_ob') and callable(self.unwrapped_gym._get_ob):
return self.unwrapped_gym._get_ob()
elif hasattr(self.unwrapped_gym, 'state'):
return self.unwrapped_gym.state if isinstance(self.unwrapped_gym.state, np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'observation'):
return self.unwrapped_gym.observation if isinstance(self.unwrapped_gym.observation,
np.ndarray) else np.array(
self.unwrapped_gym.state)
elif hasattr(self.unwrapped_gym, 'spec') and hasattr(self.unwrapped_gym.spec,
'id') and self.unwrapped_gym.spec.id in specialEnv:
return specialEnv[self.unwrapped_gym.spec.id](self)
elif hasattr(self.unwrapped_gym, 'robot'):
return self.unwrapped_gym.robot.calc_state()
else:
raise ValueError('Env id: {} is not supported for method get_state'.format(self.env_id))
@property
def unwrapped(self):
"""
:return: original unwrapped gym environment
:rtype: gym env
"""
return self._gym_env
@property
def unwrapped_gym(self):
"""
:return: gym environment, depend on attribute 'unwrapped'
:rtype: gym env
"""
if hasattr(self._gym_env, 'unwrapped'):
return self._gym_env.unwrapped
else:
return self._gym_env
@staticmethod
def _sample_with_nan(space: garage_space.Space):
"""
:param space: a 'Box'type space
:return: numpy clip of space that contains nan values
:rtype: np.ndarray
"""
assert isinstance(space, garage_space.Box)
high = np.ones_like(space.low)
low = -1 * np.ones_like(space.high)
return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),
a_min=space.low,
a_max=space.high)
def __str__(self):
return "<GymEnv instance> {}".format(self.env_id)
def get_lunarlander_state(env):
pos = env.unwrapped_gym.lander.position
vel = env.unwrapped_gym.lander.linearVelocity
fps = 50
scale = 30.0 # affects how fast-paced the game is, forces should be adjusted as well
leg_down = 18
viewport_w = 600
viewport_h = 400
state = [
(pos.x - viewport_w / scale / 2) / (viewport_w / scale / 2),
(pos.y - (env.unwrapped_gym.helipad_y + leg_down / scale)) / (viewport_h / scale / 2),
vel.x * (viewport_w / scale / 2) / fps,
vel.y * (viewport_h / scale / 2) / fps,
env.unwrapped_gym.lander.angle,
20.0 * env.unwrapped_gym.lander.angularVelocity / fps,
1.0 if env.unwrapped_gym.legs[0].ground_contact else 0.0,
1.0 if env.unwrapped_gym.legs[1].ground_contact else 0.0
]
return np.array(state, dtype=np.float32)
specialEnv = {
'LunarLander-v2': get_lunarlander_state
}
| 8,019 | 33.718615 | 112 | py |
baconian-project | baconian-project-master/baconian/envs/util.py | """
This script is from garage
"""
import gym.spaces
import numpy as np
from baconian.common import special
__all__ = [
'flat_dim', 'flatten', 'flatten_n', 'unflatten', 'unflatten_n',
'weighted_sample'
]
def flat_dim(space):
if isinstance(space, gym.spaces.Box):
return np.prod(space.low.shape)
elif isinstance(space, gym.spaces.Discrete):
return space.n
elif isinstance(space, gym.spaces.Tuple):
return np.sum([flat_dim(x) for x in space.spaces])
else:
raise NotImplementedError
def flatten(space, obs):
if isinstance(space, gym.spaces.Box):
return np.asarray(obs).flatten()
elif isinstance(space, gym.spaces.Discrete):
if space.n == 2:
obs = int(obs)
return special.to_onehot(obs, space.n)
elif isinstance(space, gym.spaces.Tuple):
return np.concatenate(
[flatten(c, xi) for c, xi in zip(space.spaces, obs)])
else:
raise NotImplementedError
def flatten_n(space, obs):
if isinstance(space, gym.spaces.Box):
obs = np.asarray(obs)
return obs.reshape((obs.shape[0], -1))
elif isinstance(space, gym.spaces.Discrete):
return special.to_onehot_n(obs, space.n)
elif isinstance(space, gym.spaces.Tuple):
obs_regrouped = [[obs[i] for o in obs] for i in range(len(obs[0]))]
flat_regrouped = [
flatten_n(c, oi) for c, oi in zip(space.spaces, obs_regrouped)
]
return np.concatenate(flat_regrouped, axis=-1)
else:
raise NotImplementedError
def unflatten(space, obs):
if isinstance(space, gym.spaces.Box):
return np.asarray(obs).reshape(space.shape)
elif isinstance(space, gym.spaces.Discrete):
return special.from_onehot(obs)
elif isinstance(space, gym.spaces.Tuple):
dims = [flat_dim(c) for c in space.spaces]
flat_xs = np.split(obs, np.cumsum(dims)[:-1])
return tuple(unflatten(c, xi) for c, xi in zip(space.spaces, flat_xs))
else:
raise NotImplementedError
def unflatten_n(space, obs):
if isinstance(space, gym.spaces.Box):
obs = np.asarray(obs)
return obs.reshape((obs.shape[0],) + space.shape)
elif isinstance(space, gym.spaces.Discrete):
return special.from_onehot_n(obs)
elif isinstance(space, gym.spaces.Tuple):
dims = [flat_dim(c) for c in space.spaces]
flat_xs = np.split(obs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [
unflatten_n(c, xi) for c, xi in zip(space.spaces, flat_xs)
]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
else:
raise NotImplementedError
def weighted_sample(space, weights):
if isinstance(space, gym.spaces.Discrete):
return special.weighted_sample(weights, range(space.n))
else:
raise NotImplementedError
| 2,885 | 30.714286 | 78 | py |
baconian-project | baconian-project-master/baconian/envs/__init__.py | import os
import platform
from pathlib import Path
_PLATFORM = platform.system()
try:
_PLATFORM_SUFFIX = {
"Linux": "linux",
"Darwin": "macos",
"Windows": "win64"
}[_PLATFORM]
except KeyError:
raise OSError("Unsupported platform: {}".format(_PLATFORM))
# TODO potential bug here if mujoco py change default path in the future
os.environ['LD_LIBRARY_PATH'] = os.environ.get('LD_LIBRARY_PATH', '') \
+ ':' + str(Path.home()) + '/.mujoco/mujoco200/bin'
if os.environ.get('MUJOCO_PY_MUJOCO_PATH', '') == ' ':
os.environ['MUJOCO_PY_MUJOCO_PATH'] = str(Path.home()) + '/.mujoco/mujoco200'
if os.environ.get('MUJOCO_PY_MJKEY_PATH', '') == ' ':
os.environ['MUJOCO_PY_MJKEY_PATH'] = str(Path.home()) + '/.mujoco/mujoco200'
os.environ['MJKEY_PATH'] = os.environ.get('MUJOCO_PY_MJKEY_PATH', '')
os.environ['MJLIB_PATH'] = os.environ.get('MUJOCO_PY_MUJOCO_PATH', '')
# TODO disable the rendering temporarily
os.environ['DISABLE_MUJOCO_RENDERING'] = '1'
have_mujoco_flag = True
try:
from dm_control import mujoco
from gym.envs.mujoco import mujoco_env
from dm_control import suite
from dm_control.rl.specs import ArraySpec
from dm_control.rl.specs import BoundedArraySpec
from collections import OrderedDict
except Exception:
have_mujoco_flag = False
| 1,353 | 29.088889 | 83 | py |
baconian-project | baconian-project-master/baconian/envs/env_wrapper.py | from baconian.core.core import Env
from baconian.common.spaces import Box
from baconian.envs.gym_env import GymEnv
import numpy as np
class Wrapper(Env):
def __init__(self, env: Env):
if isinstance(env, GymEnv):
self.env = env.unwrapped_gym
self.src_env = env
else:
self.env = env
self.src_env = env
super().__init__(name=env.name + '_wrapper', copy_from_env=env)
def __getattr__(self, item):
if hasattr(self.src_env, item):
return getattr(self.src_env, item)
if hasattr(self.env, item):
return getattr(self.env, item)
raise AttributeError()
def seed(self, seed=None):
return self.src_env.seed(seed)
@property
def unwrapped(self):
return self.env.unwrapped
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def __str__(self):
return '<{}{}>'.format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
def reset(self):
return self.src_env.reset()
def get_state(self):
return self.src_env.get_state()
class ObservationWrapper(Wrapper):
def reset(self):
observation = self.src_env.reset()
return self._observation(observation)
def step(self, action):
observation, reward, done, info = self.src_env.step(action)
return self.observation(observation), reward, done, info
def observation(self, observation):
return self._observation(observation)
def _observation(self, observation):
raise NotImplementedError
def get_state(self):
return self._observation(self.src_env.get_state())
class RewardWrapper(Wrapper):
def step(self, action):
observation, reward, done, info = self.src_env.step(action)
return observation, self.reward(observation, action, reward, done, info), done, info
def reward(self, observation, action, reward, done, info):
return self._reward(observation, action, reward, done, info)
def _reward(self, observation, action, reward, done, info):
raise NotImplementedError
class ActionWrapper(Wrapper):
def step(self, action):
action = self.action(action)
return self.src_env.step(action)
def action(self, action):
return self._action(action)
def _action(self, action):
raise NotImplementedError
def reverse_action(self, action):
return self._reverse_action(action)
def _reverse_action(self, action):
raise NotImplementedError
class StepObservationWrapper(ObservationWrapper):
def __init__(self, env: Env, step_limit=100000):
super().__init__(env=env)
assert isinstance(self.src_env.observation_space, Box), 'not support non Box space for step observation wrapper'
self.src_env.observation_space = Box(low=np.concatenate([self.src_env.observation_space.low, np.array([0])]),
high=np.concatenate(
[self.src_env.observation_space.high, np.array([step_limit])]))
self.src_env.env_spec.obs_space = self.src_env.observation_space
self.observation_space = self.src_env.observation_space
def _observation(self, observation):
obs = np.array(observation)
return np.concatenate([obs, np.array([self.src_env.trajectory_level_step_count])])
| 3,515 | 29.573913 | 120 | py |
baconian-project | baconian-project-master/baconian/envs/envs_done_func.py | """
Terminal/done signal functions in most of some model-based methods are tricky problems,
since model itself require a reward function from outside.
Most of the codebase tune the definition of the terminal function differed from original one
without explicit notifying which bring the difficult for users to tune the algorithms without the effect
brought by reward functions.
In Baconian, we try to clarify this part, make sure the user is well aware of the effect of such implementations.
This is a work in progress.
"""
from baconian.algo.dynamics.terminal_func.terminal_func import TerminalFunc, FixedEpisodeLengthTerminalFunc, \
RandomTerminalFunc
| 661 | 43.133333 | 113 | py |
baconian-project | baconian-project-master/baconian/config/global_config.py | """
The script to store some global configuration
"""
from typeguard import typechecked
import json_tricks as json
import tensorflow as tf
import os
from baconian.config.required_keys import SRC_UTIL_REQUIRED_KEYS
from baconian.common.error import *
class _SingletonDefaultGlobalConfig(object):
DEFAULT_MAX_TF_SAVER_KEEP = 5
DEFAULT_ALLOWED_EXCEPTION_OR_ERROR_LIST = (tf.errors.ResourceExhaustedError,)
DEFAULT_BASIC_STATUS_LIST = ('TRAIN', 'TEST')
DEFAULT_BASIC_INIT_STATUS = None
# config required key list
DEFAULT_DQN_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS, 'dqn.json')
DEFAULT_CONSTANT_ACTION_POLICY_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'constant_action_policy.json')
DEFAULT_ALGO_DYNA_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'dyna.json')
DEFAULT_ALGO_ME_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'me.json')
DEFAULT_MODEL_FREE_PIPELINE_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'model_free_pipeline.json')
DEFAULT_MODEL_BASED_PIPELINE_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'model_based_pipeline.json')
DEFAULT_MPC_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'mpc.json')
DEFAULT_DDPG_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'ddpg.json')
DEFAULT_MADDPG_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS,
'maddpg.json')
DEFAULT_PPO_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS, 'ppo.json')
DEFAULT_AGENT_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS, 'agent.json')
DEFAULT_EXPERIMENT_REQUIRED_KEY_LIST = os.path.join(SRC_UTIL_REQUIRED_KEYS, 'experiment.json')
# LOGGING CONFIG
DEFAULT_ALLOWED_LOG_FILE_TYPES = ('json', 'csv', 'h5py')
DEFAULT_LOG_LEVEL = 'DEBUG'
from baconian import ROOT_PATH
DEFAULT_LOG_PATH = os.path.join(ROOT_PATH, 'test/tests/tmp_path')
DEFAULT_MODEL_CHECKPOINT_PATH = os.path.join(DEFAULT_LOG_PATH, 'model_checkpoints')
DEFAULT_LOG_CONFIG_DICT = dict()
DEFAULT_LOG_USE_GLOBAL_MEMO_FLAG = True
DEFAULT_LOGGING_FORMAT = '%(levelname)s:%(asctime)-15s: %(message)s'
DEFAULT_WRITE_CONSOLE_LOG_TO_FILE_FLAG = True
DEFAULT_CONSOLE_LOG_FILE_NAME = 'console.log'
DEFAULT_CONSOLE_LOGGER_NAME = 'console_logger'
DEFAULT_EXPERIMENT_END_POINT = dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=500,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None)
DEFAULT_TURN_OFF_GLOBAL_NAME_FLAG = False
# For internal use
SAMPLE_TYPE_SAMPLE_TRANSITION_DATA = 'transition_data'
SAMPLE_TYPE_SAMPLE_TRAJECTORY_DATA = 'trajectory_data'
def __init__(self):
super().__setattr__('freeze_flag', False)
def freeze(self):
super().__setattr__('freeze_flag', True)
def unfreeze(self):
super().__setattr__('freeze_flag', False)
@typechecked
def set_new_config(self, config_dict: dict):
for key, val in config_dict.items():
if hasattr(self, key):
attr = getattr(self, key)
if attr is not None and not isinstance(val, type(attr)):
raise TypeError('Set the GlobalConfig.{} with type{}, instead of type {}'.format(key,
type(
attr).__name__,
type(
val).__name__))
setattr(self, key, val)
else:
setattr(self, key, val)
@typechecked
def set_new_config_by_file(self, path_to_file: str):
with open(path_to_file, 'r') as f:
new_dict = json.load(f)
self.set_new_config(new_dict)
@typechecked
def set(self, key: str, val):
if self.freeze_flag is True:
raise AttemptToChangeFreezeGlobalConfigError()
if hasattr(self, key):
attr = getattr(self, key)
if attr is not None and not isinstance(val, type(attr)):
raise TypeError('Set the GlobalConfig.{} with type{}, instead of type {}'.format(key,
type(
attr).__name__,
type(
val).__name__))
setattr(self, key, val)
if key == 'DEFAULT_LOG_PATH':
self.set('DEFAULT_MODEL_CHECKPOINT_PATH', os.path.join(val, 'model_checkpoints'))
else:
setattr(self, key, val)
def return_all_as_dict(self):
return_dict = {}
for key in dir(self):
if key.isupper() is True or 'DEFAULT' in key:
attr = getattr(self, key)
try:
json.dumps(dict(key=attr))
except TypeError as e:
attr = 'cannot be json dumped'
return_dict[key] = attr
return return_dict
def __setattr__(self, key, value):
if self.freeze_flag is True:
raise AttemptToChangeFreezeGlobalConfigError("{} {}".format(key, value))
else:
super().__setattr__(key, value)
class GlobalConfig(object):
only_instance = None
def __new__(cls, *args, **kwargs):
if GlobalConfig.only_instance is None:
GlobalConfig.only_instance = _SingletonDefaultGlobalConfig()
return GlobalConfig.only_instance
| 6,471 | 43.328767 | 120 | py |
baconian-project | baconian-project-master/baconian/config/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/config/dict_config.py | import os
from baconian.core.util import init_func_arg_record_decorator
import baconian.common.files as files
class Config(object):
pass
class DictConfig(Config):
@init_func_arg_record_decorator()
def __init__(self, required_key_dict: dict, config_dict: dict = None, cls_name=""):
self.cls_name = cls_name
self.required_key_dict = required_key_dict
if config_dict:
self._config_dict = config_dict
else:
self._config_dict = {}
@property
def config_dict(self):
return self._config_dict
@config_dict.setter
def config_dict(self, new_value):
if self.check_config(dict=new_value, key_dict=self.required_key_dict) is True:
for key, val in new_value.items():
if type(val) is list:
new_value[str(key)] = tuple(val)
self._config_dict = new_value
for key, val in self._config_dict.items():
setattr(self, key, val)
def save_config(self, path, name):
DictConfig.save_to_json(dict=self.config_dict, path=path, file_name=name)
def load_config(self, path):
res = DictConfig.load_json(file_path=path)
self.config_dict = res
def check_config(self, dict: dict, key_dict: dict) -> bool:
if self.check_dict_key(check_dict=dict, required_key_dict=key_dict):
return True
else:
return False
@staticmethod
def load_json(file_path):
return files.load_json(file_path)
@staticmethod
def save_to_json(dict, path, file_name=None):
if file_name is not None:
path = os.path.join(path, file_name)
files.save_to_json(dict, path=path, file_name=file_name)
def check_dict_key(self, check_dict: dict, required_key_dict: dict) -> bool:
for key, val in required_key_dict.items():
if not isinstance(check_dict, dict):
raise TypeError('{}: input check dict should be a dict instead of {}'.format(self.cls_name,
type(check_dict).__name__))
if key not in check_dict:
raise IndexError('{} Missing Key {}'.format(self.cls_name, key))
if required_key_dict[key] is not None and not isinstance(check_dict[key], type(required_key_dict[key])):
raise TypeError('{} should be type {} from required key dict file but with type {}'.
format(key, type(required_key_dict[key]), type(check_dict[key])))
if isinstance(val, dict):
self.check_dict_key(check_dict=check_dict[key], required_key_dict=required_key_dict[key])
return True
def __call__(self, key):
if key not in self.config_dict:
raise KeyError('{} key {} not in the config'.format(self.cls_name, key))
else:
return self.config_dict[key]
def __getitem__(self, item):
return self.__call__(item)
def set(self, key, val):
self.config_dict[key] = val | 3,105 | 36.421687 | 120 | py |
baconian-project | baconian-project-master/baconian/config/required_keys/__init__.py | import os
SRC_UTIL_REQUIRED_KEYS = os.path.dirname(os.path.realpath(__file__))
| 80 | 19.25 | 68 | py |
baconian-project | baconian-project-master/baconian/common/error.py | class BaconianError(Exception):
pass
class GlobalNameExistedError(BaconianError):
pass
class StatusInfoNotRegisteredError(BaconianError):
pass
class StateOrActionOutOfBoundError(BaconianError):
pass
class MemoryBufferLessThanBatchSizeError(BaconianError):
pass
class InappropriateParameterSetting(BaconianError):
pass
class DuplicatedRegisteredError(BaconianError):
pass
class LogPathOrFileNotExistedError(BaconianError):
pass
class NotCatchCorrectExceptionError(BaconianError):
pass
class AttemptToChangeFreezeGlobalConfigError(BaconianError):
pass
class MissedConfigError(BaconianError):
pass
class TransformationResultedToDifferentShapeError(BaconianError):
pass
class WrongValueRangeError(BaconianError):
pass
class ShapeNotCompatibleError(BaconianError):
pass
class EnvNotExistedError(BaconianError):
pass
class LogItemNotExisted(BaconianError):
pass
| 950 | 14.095238 | 65 | py |
baconian-project | baconian-project-master/baconian/common/special.py | """
This script is from garage
"""
import gym.spaces
import numpy as np
import scipy
import scipy.signal
from typeguard import typechecked
import baconian.common.spaces as mbrl_spaces
def weighted_sample(weights, objects):
"""
Return a random item from objects, with the weighting defined by weights
(which must sum to 1).
"""
# An array of the weights, cumulatively summed.
cs = np.cumsum(weights)
# Find the index of the first weight over a random value.
idx = sum(cs < np.random.rand())
return objects[min(idx, len(objects) - 1)]
def weighted_sample_n(prob_matrix, items):
s = prob_matrix.cumsum(axis=1)
r = np.random.rand(prob_matrix.shape[0])
k = (s < r.reshape((-1, 1))).sum(axis=1)
n_items = len(items)
return items[np.minimum(k, n_items - 1)]
# compute softmax for each row
def softmax(x):
shifted = x - np.max(x, axis=-1, keepdims=True)
expx = np.exp(shifted)
return expx / np.sum(expx, axis=-1, keepdims=True)
# compute entropy for each row
def cat_entropy(x):
return -np.sum(x * np.log(x), axis=-1)
# compute perplexity for each row
def cat_perplexity(x):
return np.exp(cat_entropy(x))
def explained_variance_1d(ypred, y):
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
if np.isclose(vary, 0):
if np.var(ypred) > 0:
return 0
else:
return 1
return 1 - np.var(y - ypred) / (vary + 1e-8)
def to_onehot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def to_onehot_n(inds, dim):
ret = np.zeros((len(inds), dim))
ret[np.arange(len(inds)), inds] = 1
return ret
def from_onehot(v):
return np.nonzero(v)[0][0]
def from_onehot_n(v):
if ((isinstance(v, np.ndarray) and not v.size)
or (isinstance(v, list) and not v)):
return []
return np.nonzero(v)[1]
def discount_cumsum(x, discount):
# See https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html#difference-equation-filtering # noqa: E501
# Here, we have y[t] - discount*y[t+1] = x[t]
# or rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
return scipy.signal.lfilter(
[1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def discount_return(x, discount):
return np.sum(x * (discount ** np.arange(len(x))))
def rk4(derivs, y0, t, *args, **kwargs):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
*args*
additional arguments passed to the derivative function
*kwargs*
additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist, *args, **kwargs))
k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))
k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))
k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout
def make_batch(v, original_shape: (list, tuple)):
if not isinstance(v, np.ndarray):
v = np.array(v)
# assert len(v.shape) <= len(original_shape) + 1
if len(v.shape) == len(original_shape) + 1 and np.equal(np.array(v.shape[1:]),
np.array(original_shape)).all() is True:
return v
else:
bs = np.prod(list(v.shape)) / np.prod(original_shape)
assert float(bs).is_integer()
return np.reshape(v, newshape=[int(bs)] + list(original_shape))
def flat_dim(space):
if isinstance(space, mbrl_spaces.Box):
return np.prod(space.low.shape)
elif isinstance(space, mbrl_spaces.Discrete):
return space.n
elif isinstance(space, mbrl_spaces.Tuple):
return np.sum([flat_dim(x) for x in space.spaces])
else:
raise NotImplementedError
def flatten(space, obs, one_hot_for_discrete=False):
if isinstance(space, mbrl_spaces.Box):
return np.asarray(obs).flatten()
elif isinstance(space, mbrl_spaces.Discrete):
if one_hot_for_discrete is True:
if space.n == 2:
obs = int(obs)
return to_onehot(obs, space.n)
else:
return int(obs)
elif isinstance(space, mbrl_spaces.Tuple):
return np.concatenate(
[flatten(c, xi) for c, xi in zip(space.spaces, obs)])
else:
raise NotImplementedError
def flatten_n(space, obs):
if isinstance(space, mbrl_spaces.Box):
obs = np.asarray(obs)
return obs.reshape((obs.shape[0], -1))
elif isinstance(space, mbrl_spaces.Discrete):
return to_onehot_n(np.array(obs, dtype=np.int), space.n)
elif isinstance(space, mbrl_spaces.Tuple):
obs_regrouped = [[obs[i] for o in obs] for i in range(len(obs[0]))]
flat_regrouped = [
flatten_n(c, oi) for c, oi in zip(space.spaces, obs_regrouped)
]
return np.concatenate(flat_regrouped, axis=-1)
else:
raise NotImplementedError
def unflatten(space, obs):
if isinstance(space, mbrl_spaces.Box):
return np.asarray(obs).reshape(space.shape)
elif isinstance(space, mbrl_spaces.Discrete):
return from_onehot(np.array(obs, dtype=np.int))
elif isinstance(space, mbrl_spaces.Tuple):
dims = [flat_dim(c) for c in space.spaces]
flat_xs = np.split(obs, np.cumsum(dims)[:-1])
return tuple(unflatten(c, xi) for c, xi in zip(space.spaces, flat_xs))
else:
raise NotImplementedError
def unflatten_n(space, obs):
if isinstance(space, mbrl_spaces.Box):
obs = np.asarray(obs)
return obs.reshape((obs.shape[0],) + space.shape)
elif isinstance(space, mbrl_spaces.Discrete):
return from_onehot_n(np.array(obs, dtype=np.int))
elif isinstance(space, mbrl_spaces.Tuple):
dims = [flat_dim(c) for c in space.spaces]
flat_xs = np.split(obs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [
unflatten_n(c, xi) for c, xi in zip(space.spaces, flat_xs)
]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
else:
raise NotImplementedError
| 7,397 | 27.898438 | 117 | py |
baconian-project | baconian-project-master/baconian/common/schedules.py | """This file is used for specifying various schedules that evolve over
time throughout the execution of the algorithm, such as:
- learning rate for the optimizer
- exploration epsilon for the epsilon greedy exploration strategy
- beta parameter for beta parameter in prioritized replay
Each schedule has a function `value(t)` which returns the current value
of the parameter given the timestep t of the optimization procedure.
"""
from typeguard import typechecked
from baconian.common.error import *
from baconian.common.logging import ConsoleLogger
class Scheduler(object):
def __init__(self):
self.final_p = None
self.initial_p = None
def value(self):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantScheduler(Scheduler):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
Scheduler.__init__(self)
def value(self):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseScheduler(Scheduler):
def __init__(self, endpoints, t_fn, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
Scheduler.__init__(self)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
self.t_fn = t_fn
assert callable(self.t_fn)
def value(self):
"""See Schedule.value"""
t = wrap_t_fn(self.t_fn)
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearScheduler(Scheduler):
@typechecked
def __init__(self, t_fn, schedule_timesteps: int, final_p: float, initial_p=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
Scheduler.__init__(self)
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
self.t_fn = t_fn
if not callable(self.t_fn):
raise TypeError("t_fn {} is not callable".format(self.t_fn))
def value(self):
t = wrap_t_fn(self.t_fn)
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
class EventScheduler(Scheduler):
def value(self) -> bool:
return False
class PeriodicalEventSchedule(EventScheduler):
"""
Trigger an event with certain scheduled period
"""
def __init__(self, t_fn, trigger_every_step, after_t=0):
super().__init__()
self.t_fn = t_fn
self.trigger_every_step = trigger_every_step
self.after_t = after_t
self.last_saved_t_value = -1
def value(self) -> bool:
"""
return a boolean, true for trigger this event, false for not.
:return:
"""
t = wrap_t_fn(self.t_fn)
if t < self.after_t:
return False
else:
if t - self.last_saved_t_value >= self.trigger_every_step:
self.last_saved_t_value = t
return True
else:
return False
def wrap_t_fn(t_fn):
try:
return t_fn()
except StatusInfoNotRegisteredError:
ConsoleLogger().print('error', 'StatusInfoNotRegisteredError occurred, return with 0')
return 0
| 5,382 | 32.64375 | 96 | py |
baconian-project | baconian-project-master/baconian/common/log_data_loader.py | from baconian.common.plotter import Plotter
import pandas as pd
from baconian.common.files import *
from collections import OrderedDict
from typing import Union
class SingleExpLogDataLoader(object):
def __init__(self, exp_root_dir: str):
self._root_dir = exp_root_dir
check_file(path=os.path.join(exp_root_dir, 'record', 'final_status.json')) # dir list
check_file(path=os.path.join(exp_root_dir, 'record', 'global_config.json'))
self.final_status = load_json(file_path=os.path.join(exp_root_dir, 'record', 'final_status.json'))
self.global_config = load_json(file_path=os.path.join(exp_root_dir, 'record', 'global_config.json'))
def load_record_data(self, agent_log_dir_name, algo_log_dir_name, env_log_dir_name):
# TODO pre-load all data here
check_dir(os.path.join(self._root_dir, agent_log_dir_name))
check_dir(os.path.join(self._root_dir, algo_log_dir_name))
def init(self):
pass
def plot_res(self, sub_log_dir_name, key, index, save_path=None, mode=('line', 'hist', 'scatter'),
average_over=1, file_name=None, save_format='png', save_flag=False,
):
log_name = os.path.join(self._root_dir, 'record', sub_log_dir_name, 'log.json')
f = open(log_name, 'r')
res_dict = json.load(f)
key_list = res_dict[key]
key_value = OrderedDict()
key_vector = []
index_vector = []
for record in key_list:
num_index = int(record[index])
index_vector.append(num_index)
key_vector.append(record["value"])
key_value[index] = index_vector
key_value[key] = key_vector
data = pd.DataFrame.from_dict(key_value) # Create dataframe for plotting
row_num = data.shape[0]
column_num = data.shape[1]
data_new = data
# Calculate mean value in horizontal axis, incompatible with histogram mode
if average_over != 1:
if mode != 'histogram':
new_row_num = int(row_num / average_over)
data_new = data.head(new_row_num).copy()
data_new.loc[:, index] = data_new.loc[:, index] * average_over
for column in range(1, column_num):
for i in range(new_row_num):
data_new.iloc[i, column] = data.iloc[i * average_over: i * average_over + average_over,
column].mean()
if mode == 'histogram':
histogram_flag = True
data_new = data.iloc[:, 1:].copy()
else:
histogram_flag = False
scatter_flag = True if mode == 'scatter' else False
Plotter.plot_any_key_in_log(data=data_new, index=index, key=key,
sub_log_dir_name=sub_log_dir_name,
scatter_flag=scatter_flag, save_flag=save_flag,
histogram_flag=histogram_flag, save_path=save_path,
save_format=save_format, file_name=file_name)
# TODO
# key and index, log given = plot figure(curve), average or 10, or set a range
# normalisation
class MultipleExpLogDataLoader(object):
def __init__(self, exp_root_dir_list: Union[str, list]):
self._root_dir = exp_root_dir_list
self.exp_list = []
self.num = 0
if type(self._root_dir) is str:
for path in os.listdir(self._root_dir):
print('path: ', path)
exp_root_dir = os.path.join(self._root_dir, path)
print(exp_root_dir)
self.exp_list.append(exp_root_dir)
self.num += 1
SingleExpLogDataLoader(exp_root_dir)
else:
for dependent_exp in self._root_dir:
assert type(dependent_exp) is str
for path in os.listdir(dependent_exp):
exp_root_dir = os.path.join(dependent_exp, path)
self.exp_list.append(exp_root_dir)
self.num += 1
SingleExpLogDataLoader(exp_root_dir)
def plot_res(self, key, index, save_path, sub_log_dir_name: str, mode=('plot', 'hist', 'scatter'), average_over=1,
save_format='png', file_name=None, save_flag=False):
multiple_key_value = {}
for exp in self.exp_list:
f = open(os.path.join(exp, 'record', sub_log_dir_name, 'log.json'), 'r')
res_dict = json.load(f)
key_list = res_dict[key]
key_vector = []
index_vector = []
for record in key_list:
num_index = int(record[index])
index_vector.append(num_index)
key_vector.append(record["value"])
multiple_key_value[index] = index_vector
multiple_key_value[key + '_' + exp] = key_vector
data = pd.DataFrame.from_dict(multiple_key_value) # Create dataframe for plotting
row_num = data.shape[0]
column_num = data.shape[1]
data_new = data
# Calculate mean value in horizontal axis, incompatible with histogram mode
if average_over != 1:
if mode != 'histogram':
new_row_num = int(row_num / average_over)
data_new = data.head(new_row_num).copy()
data_new.loc[:, index] = data_new.loc[:, index] * average_over
for column in range(1, column_num):
for i in range(new_row_num):
data_new.iloc[i, column] = data.iloc[i * average_over: i * average_over + average_over,
column].mean()
data_new['MEAN'] = data_new[data_new.columns[1:]].mean(axis=1) # axis = 1 in columns, first column not counted
data_new['STD_DEV'] = data_new[data_new.columns[1:-1]].std(axis=1)
if mode == 'histogram':
histogram_flag = True
data_new = data.iloc[:, 1:-2].copy().stack() # Mean and variance columns not counted
else:
histogram_flag = False
if mode == 'scatter':
scatter_flag = True
else:
scatter_flag = False
Plotter.plot_any_key_in_log(data=data_new, index=index, key=key, exp_num=self.num,
scatter_flag=scatter_flag, save_flag=save_flag,
mean_stddev_flag=True,
histogram_flag=histogram_flag, save_path=save_path,
sub_log_dir_name=sub_log_dir_name, save_format=save_format, file_name=file_name)
| 6,691 | 45.151724 | 119 | py |
baconian-project | baconian-project-master/baconian/common/logging.py | import abc
import logging
import os
from baconian.common.misc import construct_dict_config
from baconian.common import files as files
from baconian.core.global_var import get_all
from baconian.config.global_config import GlobalConfig
from functools import wraps
from baconian.common.error import *
class BaseLogger(object):
required_key_dict = ()
def __init__(self):
self.inited_flag = False
@abc.abstractmethod
def close(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def init(self, *args, **kwargs):
raise NotImplementedError
class _SingletonConsoleLogger(BaseLogger):
"""
A private class that should never be instanced, it is used to implement the singleton design pattern for
ConsoleLogger
"""
ALLOWED_LOG_LEVEL = ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
ALLOWED_PRINT_TYPE = ('info', 'warning', 'debug', 'critical', 'log', 'critical', 'error')
def __init__(self):
super(_SingletonConsoleLogger, self).__init__()
self.name = None
self.logger = None
def init(self, to_file_flag, level: str, to_file_name: str = None, logger_name: str = 'console_logger'):
if self.inited_flag is True:
return
self.name = logger_name
if level not in self.ALLOWED_LOG_LEVEL:
raise ValueError('Wrong log level use {} instead'.format(self.ALLOWED_LOG_LEVEL))
self.logger = logging.getLogger(self.name)
self.logger.setLevel(getattr(logging, level))
for handler in self.logger.root.handlers[:] + self.logger.handlers[:]:
self.logger.removeHandler(handler)
self.logger.root.removeHandler(handler)
self.logger.addHandler(logging.StreamHandler())
if to_file_flag is True:
self.logger.addHandler(logging.FileHandler(filename=to_file_name))
for handler in self.logger.root.handlers[:] + self.logger.handlers[:]:
handler.setFormatter(fmt=logging.Formatter(fmt=GlobalConfig().DEFAULT_LOGGING_FORMAT))
handler.setLevel(getattr(logging, level))
self.inited_flag = True
def print(self, p_type: str, p_str: str, *arg, **kwargs):
if p_type not in self.ALLOWED_PRINT_TYPE:
raise ValueError('use print type from {}'.format(self.ALLOWED_PRINT_TYPE))
getattr(self.logger, p_type)(p_str, *arg, **kwargs)
self.flush()
def close(self):
self.flush()
for handler in self.logger.root.handlers[:] + self.logger.handlers[:]:
handler.close()
self.logger.removeHandler(handler)
self.logger.root.removeHandler(handler)
def reset(self):
self.close()
self.inited_flag = False
def flush(self):
for handler in self.logger.root.handlers[:] + self.logger.handlers[:]:
handler.flush()
class _SingletonLogger(BaseLogger):
"""
A private class that should never be instanced, it is used to implement the singleton design pattern for Logger
"""
def __init__(self):
super(_SingletonLogger, self).__init__()
self._registered_recorders = []
self._log_dir = None
self._config_file_log_dir = None
self._record_file_log_dir = None
self.logger_config = None
self.log_level = None
def init(self, config_or_config_dict,
log_path, log_level=None, **kwargs):
if self.inited_flag:
return
self._log_dir = log_path
# if os.path.exists(self._log_dir):
# raise FileExistsError('%s path is existed' % self._log_dir)
self._config_file_log_dir = os.path.join(self._log_dir, 'config')
self._record_file_log_dir = os.path.join(self._log_dir, 'record')
self.logger_config = construct_dict_config(config_or_config_dict, obj=self)
self.log_level = log_level
self.inited_flag = True
@property
def log_dir(self):
if os.path.exists(self._log_dir) is False:
os.makedirs(self._log_dir)
return self._log_dir
def flush_recorder(self, recorder=None):
if not recorder:
for re in self._registered_recorders:
self._flush(re)
else:
self._flush(recorder)
def close(self):
self._save_all_obj_final_status()
self.flush_recorder()
self._registered_recorders = []
def append_recorder(self, recorder):
self._registered_recorders.append(recorder)
def reset(self):
self.close()
for re in self._registered_recorders:
re.reset()
self._registered_recorders = []
self.inited_flag = False
def _flush(self, recorder):
if recorder.is_empty():
return
log_dict, by_status_flag = recorder.get_obj_log_to_flush(clear_obj_log_flag=True)
for obj_name, obj_log_dict in log_dict.items():
if by_status_flag is True:
for status, status_log_dict in obj_log_dict.items():
ConsoleLogger().print('info', 'save {}, with status: {} log into {}'.format(str(obj_name),
str(status),
os.path.join(
self._record_file_log_dir,
str(obj_name),
str(status))))
self.out_to_file(
file_path=os.path.join(self._record_file_log_dir, str(obj_name)),
content=status_log_dict,
file_name='{}.json'.format(status))
else:
ConsoleLogger().print('info', 'save {} log into {}'.format(str(obj_name),
os.path.join(
self._record_file_log_dir,
str(obj_name))))
self.out_to_file(file_path=os.path.join(self._record_file_log_dir, str(obj_name)),
content=obj_log_dict,
file_name='log.json')
def _save_all_obj_final_status(self):
final_status = dict()
for obj_name, obj in get_all()['_global_name_dict'].items():
if hasattr(obj, 'get_status') and callable(getattr(obj, 'get_status')):
tmp_dict = dict()
tmp_dict[obj_name] = dict()
for st in obj.STATUS_LIST:
obj.set_status(st)
tmp_dict[obj_name][st] = obj.get_status()
final_status = {**final_status, **tmp_dict}
ConsoleLogger().print('info', 'save final_status into {}'.format(os.path.join(
self._record_file_log_dir)))
self.out_to_file(file_path=os.path.join(self._record_file_log_dir),
content=final_status,
force_new=True,
file_name='final_status.json')
ConsoleLogger().print('info', 'save global_config into {}'.format(os.path.join(
self._record_file_log_dir)))
self.out_to_file(file_path=os.path.join(self._record_file_log_dir),
content=GlobalConfig().return_all_as_dict(),
force_new=True,
file_name='global_config.json')
@staticmethod
def out_to_file(file_path: str, content: (tuple, list, dict), file_name: str, force_new=False):
if len(content) == 0:
return
if force_new is True:
mode = 'w'
else:
mode = 'a'
if not os.path.exists(file_path):
os.makedirs(file_path)
mode = 'w'
try:
f = open(os.path.join(file_path, file_name), mode)
except FileNotFoundError:
f = open(os.path.join(file_path, file_name), 'w')
files.save_to_json(content, fp=f)
class Logger(object):
only_instance = None
def __new__(cls, *args, **kwargs):
if Logger.only_instance is None:
Logger.only_instance = _SingletonLogger()
return Logger.only_instance
class ConsoleLogger(object):
only_instance = None
def __new__(cls, *args, **kwargs):
if not ConsoleLogger.only_instance:
ConsoleLogger.only_instance = _SingletonConsoleLogger()
return ConsoleLogger.only_instance
class Recorder(object):
def __init__(self, flush_by_split_status=True, default_obj=None):
self._obj_log = {}
self._registered_log_attr_by_get_dict = {}
Logger().append_recorder(self)
self.flush_by_split_status = flush_by_split_status
self._default_obj = default_obj
def append_to_obj_log(self, obj, attr_name: str, status_info: dict, value):
assert hasattr(obj, 'name')
if obj not in self._obj_log:
self._obj_log[obj] = {}
if attr_name not in self._obj_log[obj]:
self._obj_log[obj][attr_name] = []
self._obj_log[obj][attr_name].append(dict(**status_info, attr_name=attr_name, value=value))
def get_log(self, attr_name: str, filter_by_status: dict = None, obj=None):
if obj is None:
obj = self._default_obj
if obj not in self._obj_log:
raise LogItemNotExisted('object {} has no records in this recorder'.format(obj))
if attr_name not in self._obj_log[obj]:
raise LogItemNotExisted('no log item {} found at object {} recorder'.format(attr_name, obj))
record = self._obj_log[obj][attr_name]
if filter_by_status is not None:
# TODO reduce the time complexity of the code snippet
filtered_record = []
for r in record:
not_equal_flag = False
for key in filter_by_status.keys():
if key in r and r[key] != filter_by_status[key]:
not_equal_flag = True
if not not_equal_flag:
filtered_record.append(r)
return filtered_record
else:
return record
def is_empty(self):
return len(self._obj_log) == 0
def record(self):
self._record_by_getter()
def register_logging_attribute_by_record(self, obj, attr_name: str, static_flag: bool,
get_method=None):
"""
register an attribute that will be recorded periodically during training, duplicated registered will be ignored
:param obj:
:param attr_name:
:param static_flag:
:param get_method:
:return:
"""
if not hasattr(obj, 'get_status') or not callable(obj.get_status):
raise ValueError('registered obj {} mush have callable method get_status()'.format(type(obj)))
if obj not in self._registered_log_attr_by_get_dict:
self._registered_log_attr_by_get_dict[obj] = {}
if attr_name in self._registered_log_attr_by_get_dict[obj]:
return
self._registered_log_attr_by_get_dict[obj][attr_name] = dict(obj=obj,
attr_name=attr_name,
get_method=get_method,
static_flag=static_flag)
def _filter_by_main_status(self, clear_obj_log_flag=True):
filtered_res = dict()
for obj in self._obj_log:
filtered_res[obj.name] = dict()
status_list = obj.status_list
for stat in status_list:
filtered_res[obj.name][stat] = dict()
for attr in self._obj_log[obj]:
filtered_res[obj.name][stat][attr] = []
for attr in self._obj_log[obj]:
for val_dict in self._obj_log[obj][attr]:
filtered_res[obj.name][val_dict['status']][val_dict['attr_name']].append(val_dict)
filtered_res[obj.name][val_dict['status']][attr][-1].pop('attr_name')
if clear_obj_log_flag is True:
del self._obj_log
self._obj_log = {}
return filtered_res
def get_obj_log_to_flush(self, clear_obj_log_flag) -> (dict, bool):
if self.flush_by_split_status is True:
return self._filter_by_main_status(clear_obj_log_flag), self.flush_by_split_status
else:
filtered_res = {}
for obj in self._obj_log:
filtered_res[obj.name] = dict()
for attr in self._obj_log[obj]:
filtered_res[obj.name][attr] = []
for val_dict in self._obj_log[obj][attr]:
val_dict.pop('attr_name')
filtered_res[obj.name][attr].append(val_dict)
del self._obj_log
self._obj_log = {}
return filtered_res, self.flush_by_split_status
def reset(self):
self._obj_log = {}
self._registered_log_attr_by_get_dict = {}
def flush(self):
Logger().flush_recorder(recorder=self)
def _record_by_getter(self):
for key, obj_dict in self._registered_log_attr_by_get_dict.items():
for _, val in obj_dict.items():
if val['get_method'] is None:
res = val['obj'].__getattribute__(val['attr_name'])
else:
res = val['get_method'](val)
self.append_to_obj_log(obj=val['obj'], attr_name=val['attr_name'], status_info=val['obj'].get_status(),
value=res)
def record_return_decorator(which_recorder: str = 'global'):
def wrap(fn):
@wraps(fn)
def wrap_with_self(self, *args, **kwargs):
obj = self
if which_recorder == 'global':
recorder = get_global_recorder()
elif which_recorder == 'self':
recorder = getattr(obj, 'recorder')
else:
raise ValueError('Not supported recorder indicator: {}, use {}'.format(which_recorder, 'gloabl, self'))
if not hasattr(obj, 'get_status') or not callable(obj.get_status):
raise ValueError('registered obj {} mush have callable method get_status()'.format(type(obj)))
res = fn(self, *args, **kwargs)
if res is not None:
info = obj.get_status()
if not isinstance(res, dict):
raise TypeError('returned value by {} must be a dict in order to be recorded'.format(fn.__name__))
for key, val in res.items():
recorder.append_to_obj_log(obj=obj, attr_name=key, status_info=info, value=val)
return res
return wrap_with_self
return wrap
_global_recorder = Recorder()
def get_global_recorder() -> Recorder:
return globals()['_global_recorder']
def reset_global_recorder():
globals()['_global_recorder'].reset()
def reset_logging():
Logger().reset()
ConsoleLogger().reset()
reset_global_recorder()
| 15,588 | 39.281654 | 126 | py |
baconian-project | baconian-project-master/baconian/common/misc.py | import numpy as np
__all__ = ['generate_n_actions_hot_code', 'repeat_ndarray', 'construct_dict_config']
def generate_n_actions_hot_code(n):
res = np.arange(0, n)
action = np.zeros([n, n])
action[res, res] = 1
return action
def repeat_ndarray(np_array: np.ndarray, repeats):
np_array = np.expand_dims(np_array, axis=0)
np_array = np.repeat(np_array, axis=0, repeats=repeats)
return np_array
from baconian.config.dict_config import DictConfig
def construct_dict_config(config_or_config_dict, obj):
if isinstance(config_or_config_dict, dict):
return DictConfig(required_key_dict=obj.required_key_dict,
config_dict=config_or_config_dict,
cls_name=type(obj).__name__)
elif isinstance(config_or_config_dict, dict):
return config_or_config_dict
else:
raise TypeError('Type {} is not supported, use dict or Config'.format(type(config_or_config_dict).__name__))
| 978 | 30.580645 | 116 | py |
baconian-project | baconian-project-master/baconian/common/noise.py | """
From openai baselines
"""
import numpy as np
from typeguard import typechecked
from baconian.common.schedules import Scheduler
class AdaptiveParamNoiseSpec(object):
def __init__(self, initial_stddev=0.1, desired_action_stddev=0.1, adoption_coefficient=1.01):
self.initial_stddev = initial_stddev
self.desired_action_stddev = desired_action_stddev
self.adoption_coefficient = adoption_coefficient
self.current_stddev = initial_stddev
def adapt(self, distance):
if distance > self.desired_action_stddev:
# Decrease stddev.
self.current_stddev /= self.adoption_coefficient
else:
# Increase stddev.
self.current_stddev *= self.adoption_coefficient
def get_stats(self):
stats = {
'param_noise_stddev': self.current_stddev,
}
return stats
def __repr__(self):
fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adoption_coefficient={})'
return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adoption_coefficient)
class ActionNoise(object):
def reset(self):
pass
def __call__(self, *args, **kwargs):
return 0.0
class NormalActionNoise(ActionNoise):
def __init__(self, mu=0.0, sigma=1.0):
self.mu = mu
self.sigma = sigma
def __call__(self):
return np.random.normal(self.mu, self.sigma)
def __repr__(self):
return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma * np.sqrt(
self.dt) * np.random.normal(size=self.mu.shape)
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
class UniformNoise(ActionNoise):
def __init__(self, scale):
self.scale = scale
def __call__(self):
uniformNoiseValue = self.scale * (np.random.rand() - 0.5)
return uniformNoiseValue
class OUNoise(ActionNoise):
def __init__(self, theta=0.05, sigma=0.25, init_state=0.0):
self.theta = theta
self.sigma = sigma
self.state = init_state
def __call__(self):
state = self.state - self.theta * self.state + self.sigma * np.random.randn()
self.state = state
return self.state
def reset(self):
self.state = 0.0
class AgentActionNoiseWrapper(object):
INJECT_TYPE = ['WEIGHTED_DECAY', '']
@typechecked
def __init__(self, noise: ActionNoise, action_weight_scheduler: Scheduler, noise_weight_scheduler: Scheduler):
self.action_weight_scheduler = action_weight_scheduler
self.noise_weight_scheduler = noise_weight_scheduler
self.noise = noise
def __call__(self, action, **kwargs):
noise = self.noise()
return self.action_weight_scheduler.value() * action + self.noise_weight_scheduler.value() * noise
def reset(self):
self.noise.reset()
| 3,558 | 29.161017 | 114 | py |
baconian-project | baconian-project-master/baconian/common/random.py | # import numpy as np
# import time
# import typeguard as tg
# import functools
#
#
# def random_snapshot(func):
# @functools.wraps(func)
# def wrapper(random_instance):
# random_instance.state_snapshot = random_instance._np_random.get_state()
# return func(random_instance)
#
# return wrapper
#
#
# class Random(object):
# """
#
# A random utility that based on Numpy random module in order to better control the randomness of the system.
# The python random is not recommended to use
#
# """
#
# @tg.typechecked
# def __init__(self, seed: int = int(round(time.time() * 1000)) % (2 ** 32 - 1), global_state: bool = True):
# self.seed = seed
# if global_state:
# self._np_random = np.random
# else:
# self._np_random = np.random.RandomState()
# self._np_random.seed(seed)
# self.state_snapshot = self._np_random.get_state()
#
# @random_snapshot
# def unwrapped(self):
# return self._np_random
#
# @tg.typechecked
# def set_seed(self, seed: int):
# self.seed = seed
# self._np_random.seed(seed)
#
# @tg.typechecked
# def set_state(self, state: tuple):
# self._np_random.set_state(state)
#
# def reset_last_state(self):
# self.set_state(self.state_snapshot)
#
# def _register_all_np_method(self):
# raise NotImplementedError
#
#
# if __name__ == '__main__':
# r = Random()
# print(r.unwrapped().rand(1, 1))
# r.reset_last_state()
# print(r.unwrapped().rand(1, 1))
| 1,573 | 25.677966 | 113 | py |
baconian-project | baconian-project-master/baconian/common/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/common/plotter.py | import numpy as np
from matplotlib import pyplot as plt
import json
import seaborn as sns
sns.set_style('whitegrid')
class Plotter(object):
markers = ('+', 'x', 'v', 'o', '^', '<', '>', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
color_list = ['b', 'r', 'g', 'm', 'y', 'k', 'cyan', 'plum', 'darkgreen', 'darkorange', 'oldlace', 'chocolate',
'purple', 'lightskyblue', 'gray', 'seagreen', 'antiquewhite',
'snow', 'darkviolet', 'brown', 'skyblue', 'mediumaquamarine', 'midnightblue', 'darkturquoise',
'sienna', 'lightsteelblue', 'gold', 'teal', 'blueviolet', 'mistyrose', 'seashell', 'goldenrod',
'forestgreen', 'aquamarine', 'linen', 'deeppink', 'darkslategray', 'mediumseagreen', 'dimgray',
'mediumpurple', 'lightgray', 'khaki', 'dodgerblue', 'papayawhip', 'salmon', 'floralwhite',
'lightpink', 'gainsboro', 'coral', 'indigo', 'darksalmon', 'royalblue', 'navy', 'orangered',
'cadetblue', 'orchid', 'palegreen', 'magenta', 'honeydew', 'darkgray', 'palegoldenrod', 'springgreen',
'lawngreen', 'palevioletred', 'olive', 'red', 'lime', 'yellowgreen', 'aliceblue', 'orange',
'chartreuse', 'lavender', 'paleturquoise', 'blue', 'azure', 'yellow', 'aqua', 'mediumspringgreen',
'cornsilk', 'lightblue', 'steelblue', 'violet', 'sandybrown', 'wheat', 'greenyellow', 'darkred',
'mediumslateblue', 'lightseagreen', 'darkblue', 'moccasin', 'lightyellow', 'turquoise', 'tan',
'mediumvioletred', 'mediumturquoise', 'limegreen', 'slategray', 'lightslategray', 'mintcream',
'darkgreen', 'white', 'mediumorchid', 'firebrick', 'bisque', 'darkcyan', 'ghostwhite', 'powderblue',
'tomato', 'lavenderblush', 'darkorchid', 'cornflowerblue', 'plum', 'ivory', 'darkgoldenrod', 'green',
'burlywood', 'hotpink', 'cyan', 'silver', 'peru', 'thistle', 'indianred', 'olivedrab',
'lightgoldenrodyellow', 'maroon', 'black', 'crimson', 'darkolivegreen', 'lightgreen', 'darkseagreen',
'lightcyan', 'saddlebrown', 'deepskyblue', 'slateblue', 'whitesmoke', 'pink', 'darkmagenta',
'darkkhaki', 'mediumblue', 'beige', 'blanchedalmond', 'lightsalmon', 'lemonchiffon', 'navajowhite',
'darkslateblue', 'lightcoral', 'rosybrown', 'fuchsia', 'peachpuff']
def plot_fig(self, fig_num, col_id, x, y, title, x_label, y_label, label=' ', marker='*'):
plt.figure(fig_num, figsize=(6, 5))
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.tight_layout()
marker_every = max(int(len(x) / 10), 1)
if len(np.array(y).shape) > 1:
new_shape = np.array(y).shape
res = np.reshape(np.reshape(np.array([y]), newshape=[-1]), newshape=[new_shape[1], new_shape[0]],
order='F').tolist()
res = list(res)
for i in range(len(res)):
res_i = res[i]
plt.subplot(len(res), 1, i + 1)
plt.title(title + '_' + str(i))
plt.plot(x, res_i, self.color_list[col_id], label=label + '_' + str(i), marker=marker,
markevery=marker_every, markersize=6, linewidth=1)
col_id += 1
else:
plt.plot(x, y, self.color_list[col_id], label=label, marker=marker, markevery=marker_every, markersize=6,
linewidth=1)
plt.legend()
@staticmethod
def plot_any_key_in_log(data, key, index, exp_num=1,
sub_log_dir_name=None,
scatter_flag=False,
histogram_flag=False,
save_flag=False,
save_path=None,
save_format='png',
file_name=None,
separate_exp_flag=True,
mean_stddev_flag=False,
path_list=None,
):
"""
:param data: a pandas DataFrame containing (index and) key columns
:param key: in y-axis, the variable to plot, assigned by user
:param index: in x-axis, the argument assigned by user
:param sub_log_dir_name: the sub-directory which the log file to plot is saved in
:param exp_num: [optional] the number of experiments to visualize
:param scatter_flag: [optional] draw scatter plot if true
:param histogram_flag: [optional] draw histogram if true
:param save_flag: [optional] save the figure to a file if true
:param save_path: [optional] save path of figure, assigned by user, the directory of log_path by default
:param save_format: [optional] format of figure to save, png by default
:param file_name: [optional] the file name of the file to save, key_VERUS_index by default
:param separate_exp_flag: [optional] plot the results of each experiment separately if true
:param mean_stddev_flag: [optional] plot the mean value of multiple experiment results and standard deviation
:param path_list: [optional] the list of save paths assigned by users, figure file will be saved to each path
:return:
"""
marker_every = max(int(data.shape[0] / 10), 1)
# plt.figure()
fig, ax = plt.subplots(1)
if separate_exp_flag is True:
for i in range(exp_num):
if scatter_flag is True:
ax.scatter(data[index], data.iloc[:, i + 1], lw=1, label=key + '_' + str(i),
c=Plotter.color_list[i], alpha=0.8, )
elif histogram_flag is True:
num_bins = 20
n, bins, patches = ax.hist(x=data[0], bins=num_bins)
else:
ax.plot(data[index], data.iloc[:, i + 1], lw=1, label=key + '_' + str(i),
color=Plotter.color_list[i],
marker=Plotter.markers[i], markevery=marker_every, markersize=6, )
if mean_stddev_flag is True:
if histogram_flag is not True:
ax.plot(data[index], data['MEAN'], lw=3, label='MEAN', color='silver')
ax.fill_between(data[index], data['MEAN'] + data['STD_DEV'], data['MEAN'] - data['STD_DEV'],
facecolor='silver', alpha=0.5)
plt.title(sub_log_dir_name)
lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2), shadow=True, ncol=3)
if histogram_flag is not True:
plt.xlabel(index)
plt.ylabel(key)
else:
plt.xlabel(key)
plt.ylabel('count')
plt.legend()
# Save the figure to a file to a path or paths in a list
if save_flag is True:
if file_name is None:
file_name = '/%s_VERSUS_%s' % (key, index)
if path_list is not None:
for path in path_list:
plt.savefig(path + '/%s.%s' % (file_name, save_format), bbox_extra_artists=(lgd,),
bbox_inches='tight', format=save_format)
print("Save plot figure to {path} as {file_name}".format(path=path,
file_name='%s.%s' % (
file_name, save_format)))
if save_path is not None:
plt.savefig(save_path + '/%s.%s' % (file_name, save_format), bbox_extra_artists=(lgd,),
bbox_inches='tight', format=save_format)
print("Save plot figure to {path} as {file_name}".format(path=save_path,
file_name='%s.%s' % (
file_name, save_format)))
plt.show()
@staticmethod
def plot_any_scatter_in_log(res_dict, res_name, file_name, key, index, op, scatter_flag=False, save_flag=False,
save_path=None,
fig_id=4, label='', restrict_dict=None):
with open(res_dict[res_name], 'r') as f:
path_list = json.load(f)
plt.figure(fig_id)
plt.title("%s_%s_%s" % (res_name, file_name, key))
plt.xlabel('index')
plt.ylabel(key)
for i in range(len(path_list)):
test_reward = []
real_env_sample_count_index = []
with open(file=path_list[i] + '/loss/' + file_name, mode='r') as f:
test_data = json.load(fp=f)
for sample in test_data:
if restrict_dict is not None:
flag = True
for re_key, re_value in restrict_dict.items():
if sample[re_key] != re_value:
flag = False
if flag is True:
test_reward.append(sample[key])
real_env_sample_count_index.append(sample[index])
else:
test_reward.append(sample[key])
real_env_sample_count_index.append(sample[index])
test_reward, real_env_sample_count_index = op(test_reward, real_env_sample_count_index)
x_keys = []
y_values = []
last_key = real_env_sample_count_index[0]
last_set = []
for j in range(len(real_env_sample_count_index)):
if real_env_sample_count_index[j] == last_key:
last_set.append(test_reward[j])
else:
x_keys.append(last_key)
y_values.append(last_set)
last_key = real_env_sample_count_index[j]
last_set = [test_reward[j]]
x_keys.append(last_key)
y_values.append(last_set)
y_values_mean = [np.mean(y_values[j]) for j in range(len(y_values))]
if scatter_flag is True:
plt.scatter(x_keys, y_values_mean, c=Plotter.color_list[i], label=key + label + str(i),
marker=Plotter.markers[i])
else:
plt.plot(x_keys, y_values_mean, c=Plotter.color_list[i], label=key + label + str(i),
marker=Plotter.markers[i])
plt.legend()
if save_flag is True:
for path in path_list:
plt.savefig(path + '/%s_%s.png' % (file_name, key))
if save_path is not None:
plt.savefig(save_path)
print()
plt.show()
| 10,985 | 53.656716 | 120 | py |
baconian-project | baconian-project-master/baconian/common/files.py | import json_tricks as json
import os
import shutil
from baconian.common.error import *
def create_path(path, del_if_existed=True):
if os.path.exists(path) is True and del_if_existed is False:
raise FileExistsError()
else:
try:
shutil.rmtree(path)
except FileNotFoundError:
pass
os.makedirs(path)
def load_json(file_path) -> (dict, list):
with open(file_path, 'r') as f:
res = json.load(f)
return res
def check_dir(path):
if os.path.isdir(path) is False:
raise LogPathOrFileNotExistedError('{} not existed'.format(path))
def check_file(path):
if os.path.isfile(path) is False:
raise LogPathOrFileNotExistedError('{} not existed'.format(path))
def save_to_json(obj: (list, dict), path=None, fp=None, file_name=None):
jsonable_dict = convert_to_jsonable(dict_or_list=obj)
if fp:
json.dump(obj=jsonable_dict, fp=fp, indent=4, sort_keys=True)
fp.close()
else:
if file_name is not None:
path = os.path.join(path, file_name)
with open(path, 'w') as f:
json.dump(obj=obj, fp=f, indent=4, sort_keys=True)
def convert_to_jsonable(dict_or_list) -> (list, dict):
if isinstance(dict_or_list, list):
jsonable_dict = []
for val in dict_or_list:
if isinstance(val, (dict, list)):
res = convert_to_jsonable(dict_or_list=val)
jsonable_dict.append(res)
else:
f = open(os.devnull, 'w')
try:
json.dump([val], f)
except Exception:
jsonable_dict.append(str(val))
else:
jsonable_dict.append(val)
finally:
f.close()
return jsonable_dict
elif isinstance(dict_or_list, dict):
jsonable_dict = dict()
for key, val in dict_or_list.items():
if isinstance(val, (dict, list)):
res = convert_to_jsonable(dict_or_list=val)
jsonable_dict[key] = res
else:
f = open(os.devnull, 'w')
try:
json.dump([val], f)
except Exception:
jsonable_dict[key] = str(val)
else:
jsonable_dict[key] = val
finally:
f.close()
return jsonable_dict
def convert_dict_to_csv(log_dict):
"""
This function will convert a log dict into csv file by recursively find the list in the
dict and save the list with its key as a single csv file, the
:param log_dict:
:return: list, as the dict as each element with
{'csv_file_name', 'csv_keys', 'csv_row_data'}
"""
raise NotImplementedError
| 2,826 | 29.397849 | 91 | py |
baconian-project | baconian-project-master/baconian/common/data_pre_processing.py | """
A scikit-learn liked module for handle the data pre-processing including normalization, standardization,
"""
import numpy as np
from baconian.common.error import *
class DataScaler(object):
def __init__(self, dims):
self.data_dims = dims
def _check_scaler(self, scaler) -> bool:
if len(scaler.shape) != 1 or scaler.shape[0] != self.data_dims:
return False
else:
return True
def _compute_stat_of_batch_data(self, data):
data = np.array(data)
if self._check_data(data) is False:
raise ShapeNotCompatibleError("data shape is not compatible")
else:
return np.min(data, axis=0), np.max(data, axis=0), np.mean(data, axis=0), np.var(data, axis=0)
def _check_data(self, data) -> bool:
if len(data.shape) != 2 or data.shape[1] != self.data_dims:
return False
else:
return True
def process(self, data):
raise NotImplementedError
def inverse_process(self, data):
raise NotImplementedError
def update_scaler(self, data):
pass
class IdenticalDataScaler(DataScaler):
def __init__(self, dims):
self.data_dims = dims
def process(self, data):
return data
def inverse_process(self, data):
return data
class MinMaxScaler(DataScaler):
def __init__(self, dims: int, desired_range: tuple = None):
super().__init__(dims)
self._min = np.zeros([dims])
self._max = np.ones([dims])
if desired_range is None:
self._desired_range = (np.zeros(dims), np.ones(dims))
else:
if len(desired_range) != 2 or self._check_scaler(np.array(desired_range[0])) is False or self._check_scaler(
np.array(desired_range[1])) is False:
raise ShapeNotCompatibleError("desired value dims is not compatible with dims")
self._desired_range = (np.array(desired_range[0]), np.array(desired_range[1]))
def process(self, data):
return (data - self._min) / (self._max - self._min) * (self._desired_range[1] - self._desired_range[0]) + \
self._desired_range[0]
def get_param(self):
return dict(min=self._min.tolist(),
max=self._max.tolist(),
desired_range=np.array(self._desired_range).tolist())
def set_param(self, min=None, max=None, desired_range=None):
if self._check_scaler(min):
self._min = min
else:
raise ShapeNotCompatibleError('the shape of min/max range is not as same as shape')
if self._check_scaler(max):
self._max = max
else:
raise ShapeNotCompatibleError('the shape of min/max range is not as same as shape')
if len(desired_range) != 2 and self._check_scaler(np.array(desired_range[0])) is False or self._check_scaler(
np.array(desired_range[1])) is False:
raise ShapeNotCompatibleError("desired value dims is not compatible with dims")
else:
self._desired_range = (np.array(desired_range[0]), np.array(desired_range[1]))
def inverse_process(self, data):
if np.greater(np.array(data), self._desired_range[1]).any() or np.less(data, self._desired_range[0]).any():
raise WrongValueRangeError('data for inverse process not in the range {} {}'.format(self._desired_range[0],
self._desired_range[1]))
return (np.array(data) - self._desired_range[0]) / (self._desired_range[1] - self._desired_range[0]) * \
(self._max - self._min) + self._min
class RunningMinMaxScaler(MinMaxScaler):
"""
A scaler with running mean and max across all data updated into the scaler and scale the data to a desired range
"""
def __init__(self, dims: int, desired_range: tuple = None, init_data: np.ndarray = None,
init_min: np.ndarray = None,
init_max: np.ndarray = None):
super().__init__(dims=dims, desired_range=desired_range)
if init_max is not None and init_max is not None:
self._min = np.array(init_min)
self._max = np.array(init_max)
elif init_data is not None:
self._min, self._max, _, _ = self._compute_stat_of_batch_data(init_data)
if self._check_scaler(self._min) is False or self._check_scaler(self._max) is False:
raise ShapeNotCompatibleError('the shape of min/max range is not as same as shape')
def update_scaler(self, data):
if len(np.array(data)) == 0:
return
if self._min is not None:
self._min = np.minimum(np.min(data, axis=0), self._min)
else:
self._min = np.min(data, axis=0)
if self._max is not None:
self._max = np.maximum(np.max(data, axis=0), self._max)
else:
self._max = np.max(data, axis=0)
class BatchMinMaxScaler(MinMaxScaler):
def process(self, data):
if self._check_data(data) is False:
raise ShapeNotCompatibleError("data is compatible with scaler, make sure only scale the box type data")
self._min, self._max, _, _ = self._compute_stat_of_batch_data(data)
return super().process(data)
class StandardScaler(DataScaler):
def __init__(self, dims: int):
super().__init__(dims)
self._var = np.ones([dims])
self._mean = np.zeros([dims])
self._data_count = 0
self._epsilon = 0.01
def process(self, data):
return (np.array(data) - self._mean) / (np.sqrt(self._var) + self._epsilon)
def inverse_process(self, data):
return np.array(data) * (np.sqrt(self._var) + self._epsilon) + self._mean
def get_param(self):
return dict(mean=self._mean.tolist(),
var=self._var.tolist())
class BatchStandardScaler(StandardScaler):
def process(self, data):
if self._check_data(data) is False:
raise ShapeNotCompatibleError("data is compatible with scaler")
_, _, self._mean, self._var = self._compute_stat_of_batch_data(data)
return super().process(data)
class RunningStandardScaler(StandardScaler):
"""
A scaler with running mean and variance across all data passed into the scaler and scale the data with zero mean and
unit variance.
"""
def __init__(self, dims: int, init_data: np.ndarray = None,
init_mean: np.ndarray = None,
init_var: np.ndarray = None,
init_mean_var_data_count=None):
super().__init__(dims)
if init_mean is not None and init_var is not None:
self._mean = init_mean
self._var = init_var
self._data_count = init_mean_var_data_count
elif init_data is not None:
_, _, self._mean, self._var = self._compute_stat_of_batch_data(init_data)
self._data_count = np.array(init_data).shape[0]
def update_scaler(self, data):
if len(np.array(data)) == 0:
return
if self._mean is None or self._var is None:
_, _, self._mean, self._var = self._compute_stat_of_batch_data(data)
self._data_count = data.shape[0]
else:
n = data.shape[0]
new_data_var = np.var(data, axis=0)
new_data_mean = np.mean(data, axis=0)
new_data_mean_sq = np.square(new_data_mean)
new_means = ((self._mean * self._data_count) + (new_data_mean * n)) / (self._data_count + n)
self._var = (((self._data_count * (self._var + np.square(self._mean))) +
(n * (new_data_var + new_data_mean_sq))) / (self._data_count + n) -
np.square(new_means))
self._var = np.maximum(0.0, self._var)
self._mean = new_means
self._data_count += n
def set_param(self, mean=None, var=None):
assert np.array(mean).shape == self._mean.shape, 'new mean shape is changed'
assert np.array(var).shape == self._var.shape, 'new var shape is changed'
self._mean = mean
self._var = var
| 8,243 | 38.444976 | 120 | py |
baconian-project | baconian-project-master/baconian/common/sampler/sampler.py | from baconian.core.core import Basic, Env
from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from typeguard import typechecked
import numpy as np
class Sampler(object):
"""
Sampler module that handle the sampling procedure for training/testing of the agent.
"""
@staticmethod
@typechecked
def sample(env: Env,
agent,
sample_count: int,
sample_type='transition',
reset_at_start=None) -> (TransitionData, TrajectoryData):
"""
a static method of sample function
:param env: environment object to sample from.
:param agent: agent object to offer the sampling policy
:param in_which_status: a string, "TEST" or "TRAIN" indicate this sample is used for training or testing
(evaluation)
:param sample_count: number of samples. If the sample_type == "transition", then this value means the number of
transitions, usually for off-policy method like DQN, DDPG. If the sample_type ==
"trajectory", then this value means the numbers of trajectories.
:param sample_type: a string, "transition" or "trajectory".
:param reset_at_start: A bool, if True, will reset the environment at the beginning, if False, continue sampling
based on previous state (this is useful for certain tasks that you need to preserve
previous state to reach the terminal goal state). If None, for sample_type ==
"transition", it will set to False, for sample_type == "trajectory", it will set to True.
:return: SampleData object.
"""
state = None
if reset_at_start is True or (reset_at_start is None and sample_type == 'trajectory'):
state = env.reset()
elif reset_at_start is False or (reset_at_start is None and sample_type == 'transition'):
state = env.get_state()
if sample_type == 'transition':
return Sampler._sample_transitions(env, agent, sample_count, state)
elif sample_type == 'trajectory':
return Sampler._sample_trajectories(env, agent, sample_count, state)
else:
raise ValueError()
@staticmethod
def _sample_transitions(env: Env, agent, sample_count, init_state):
state = init_state
sample_record = TransitionData(env_spec=env.env_spec)
for i in range(sample_count):
action = agent.predict(obs=state)
new_state, re, done, info = env.step(action)
if not isinstance(done, bool):
raise TypeError()
sample_record.append(state=state,
action=action,
reward=re,
new_state=new_state,
done=done)
if done:
state = env.reset()
agent.reset_on_terminal_state()
else:
state = new_state
return sample_record
@staticmethod
def _sample_trajectories(env, agent, sample_count, init_state):
state = init_state
sample_record = TrajectoryData(env.env_spec)
done = False
for i in range(sample_count):
traj_record = TransitionData(env.env_spec)
while done is not True:
action = agent.predict(obs=state)
new_state, re, done, info = env.step(action)
if not isinstance(done, bool):
raise TypeError()
traj_record.append(state=state,
action=action,
reward=re,
new_state=new_state,
done=done)
state = new_state
agent.reset_on_terminal_state()
done = False
state = env.reset()
sample_record.append(traj_record)
return sample_record
| 4,178 | 42.53125 | 125 | py |
baconian-project | baconian-project-master/baconian/common/sampler/sample_data.py | from baconian.common.special import *
from baconian.core.core import EnvSpec
from copy import deepcopy
import typeguard as tg
from baconian.common.error import *
class SampleData(object):
def __init__(self, env_spec: EnvSpec = None, obs_shape=None, action_shape=None):
if env_spec is None and (obs_shape is None or action_shape is None):
raise ValueError('At least env_spec or (obs_shape, action_shape) should be passed in')
self.env_spec = env_spec
self.obs_shape = env_spec.obs_shape if env_spec else obs_shape
self.action_shape = env_spec.action_shape if env_spec else action_shape
def reset(self):
raise NotImplementedError
def append(self, *args, **kwargs):
raise NotImplementedError
def union(self, sample_data):
raise NotImplementedError
def get_copy(self):
raise NotImplementedError
def __call__(self, set_name, **kwargs):
raise NotImplementedError
def append_new_set(self, name, data_set: (list, np.ndarray), shape: (tuple, list)):
raise NotImplementedError
def sample_batch(self, *args, **kwargs):
raise NotImplementedError
def apply_transformation(self, set_name, func, *args, **kwargs):
raise NotImplementedError
def apply_op(self, set_name, func, *args, **kwargs):
raise NotImplementedError
class TransitionData(SampleData):
def __init__(self, env_spec: EnvSpec = None, obs_shape=None, action_shape=None):
super(TransitionData, self).__init__(env_spec=env_spec, obs_shape=obs_shape, action_shape=action_shape)
self.cumulative_reward = 0.0
self.step_count_per_episode = 0
assert isinstance(self.obs_shape, (list, tuple))
assert isinstance(self.action_shape, (list, tuple))
self.obs_shape = list(self.obs_shape)
self.action_shape = list(self.action_shape)
self._internal_data_dict = {
'state_set': [np.empty([0] + self.obs_shape), self.obs_shape],
'new_state_set': [np.empty([0] + self.obs_shape), self.obs_shape],
'action_set': [np.empty([0] + self.action_shape), self.action_shape],
'reward_set': [np.empty([0]), []],
'done_set': [np.empty([0], dtype=bool), []]
}
self.current_index = 0
def __len__(self):
return len(self._internal_data_dict['state_set'][0])
def __call__(self, set_name, **kwargs):
if set_name not in self._allowed_data_set_keys:
raise ValueError('pass in set_name within {} '.format(self._allowed_data_set_keys))
return make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
def reset(self):
for key, data_set in self._internal_data_dict.items():
self._internal_data_dict[key][0] = np.empty([0, *self._internal_data_dict[key][1]])
self.cumulative_reward = 0.0
self.step_count_per_episode = 0
def append(self, state: np.ndarray, action: np.ndarray, new_state: np.ndarray, done: bool, reward: float):
self._internal_data_dict['state_set'][0] = np.concatenate(
(self._internal_data_dict['state_set'][0], np.reshape(state, [1] + self.obs_shape)), axis=0)
self._internal_data_dict['new_state_set'][0] = np.concatenate(
(self._internal_data_dict['new_state_set'][0], np.reshape(new_state, [1] + self.obs_shape)), axis=0)
self._internal_data_dict['reward_set'][0] = np.concatenate(
(self._internal_data_dict['reward_set'][0], np.reshape(reward, [1])), axis=0)
self._internal_data_dict['done_set'][0] = np.concatenate(
(self._internal_data_dict['done_set'][0], np.reshape(np.array(done, dtype=bool), [1])), axis=0)
self._internal_data_dict['action_set'][0] = np.concatenate(
(self._internal_data_dict['action_set'][0], np.reshape(action, [1] + self.action_shape)), axis=0)
self.cumulative_reward += reward
def union(self, sample_data):
assert isinstance(sample_data, type(self))
self.cumulative_reward += sample_data.cumulative_reward
self.step_count_per_episode += sample_data.step_count_per_episode
for key, val in self._internal_data_dict.items():
assert self._internal_data_dict[key][1] == sample_data._internal_data_dict[key][1]
self._internal_data_dict[key][0] = np.concatenate(
(self._internal_data_dict[key][0], sample_data._internal_data_dict[key][0]), axis=0)
def get_copy(self):
obj = TransitionData(env_spec=self.env_spec, obs_shape=self.obs_shape, action_shape=self.action_shape)
for key in self._internal_data_dict:
obj._internal_data_dict[key] = deepcopy(self._internal_data_dict[key])
return obj
def append_new_set(self, name, data_set: (list, np.ndarray), shape: (tuple, list)):
assert len(data_set) == len(self)
assert len(np.array(data_set).shape) - 1 == len(shape)
if len(shape) > 0:
assert np.equal(np.array(data_set).shape[1:], shape).all()
shape = tuple(shape)
self._internal_data_dict[name] = [np.array(data_set), shape]
def sample_batch(self, batch_size, shuffle_flag=True, **kwargs) -> dict:
if shuffle_flag is False:
raise NotImplementedError
total_num = len(self)
id_index = np.random.randint(low=0, high=total_num, size=batch_size)
batch_data = dict()
for key in self._internal_data_dict.keys():
batch_data[key] = self(key)[id_index]
return batch_data
def get_mean_of(self, set_name):
return self.apply_op(set_name=set_name, func=np.mean)
def get_sum_of(self, set_name):
return self.apply_op(set_name=set_name, func=np.sum)
def apply_transformation(self, set_name, func, direct_apply=False, **func_kwargs):
data = make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
transformed_data = make_batch(func(data, **func_kwargs),
original_shape=self._internal_data_dict[set_name][1])
if transformed_data.shape != data.shape:
raise TransformationResultedToDifferentShapeError()
elif direct_apply is True:
self._internal_data_dict[set_name][0] = transformed_data
return transformed_data
def apply_op(self, set_name, func, **func_kwargs):
data = make_batch(self._internal_data_dict[set_name][0],
original_shape=self._internal_data_dict[set_name][1])
applied_op_data = np.array(func(data, **func_kwargs))
return applied_op_data
def shuffle(self, index: list = None):
if not index:
index = np.arange(len(self._internal_data_dict['state_set'][0]))
np.random.shuffle(index)
for key in self._internal_data_dict.keys():
self._internal_data_dict[key][0] = self._internal_data_dict[key][0][index]
@property
def _allowed_data_set_keys(self):
return list(self._internal_data_dict.keys())
@property
def state_set(self):
return self('state_set')
@property
def new_state_set(self):
return self('new_state_set')
@property
def action_set(self):
return self('action_set')
@property
def reward_set(self):
return self('reward_set')
@property
def done_set(self):
return self('done_set')
class TrajectoryData(SampleData):
def __init__(self, env_spec=None, obs_shape=None, action_shape=None):
super(TrajectoryData, self).__init__(env_spec=env_spec, obs_shape=obs_shape, action_shape=action_shape)
self.trajectories = []
def reset(self):
self.trajectories = []
def append(self, transition_data: TransitionData):
self.trajectories.append(transition_data)
def union(self, sample_data):
if not isinstance(sample_data, type(self)):
raise TypeError()
self.trajectories += sample_data.trajectories
def return_as_transition_data(self, shuffle_flag=False) -> TransitionData:
transition_set = self.trajectories[0].get_copy()
for i in range(1, len(self.trajectories)):
transition_set.union(self.trajectories[i])
if shuffle_flag is True:
transition_set.shuffle()
return transition_set
def get_mean_of(self, set_name):
tran = self.return_as_transition_data()
return tran.get_mean_of(set_name)
def get_sum_of(self, set_name):
tran = self.return_as_transition_data()
return tran.get_sum_of(set_name)
def __len__(self):
return len(self.trajectories)
def get_copy(self):
tmp_traj = TrajectoryData(env_spec=self.env_spec, obs_shape=self.obs_shape, action_shape=self.action_shape)
for traj in self.trajectories:
tmp_traj.append(transition_data=traj.get_copy())
return tmp_traj
def apply_transformation(self, set_name, func, direct_apply=False, **func_kwargs):
# TODO unit test
for traj in self.trajectories:
traj.apply_transformation(set_name, func, direct_apply, **func_kwargs)
def apply_op(self, set_name, func, **func_kwargs):
# TODO unit test
res = []
for traj in self.trajectories:
res.append(traj.apply_op(set_name, func, **func_kwargs))
return np.array(res)
| 9,553 | 40.359307 | 115 | py |
baconian-project | baconian-project-master/baconian/common/sampler/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/common/spaces/box.py | import numpy as np
from baconian.common.spaces.base import Space
class Box(Space):
"""
A box in R^n.
I.e., each coordinate is bounded.
"""
def __init__(self, low, high, shape=None):
"""
Two kinds of valid input:
Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is
provided
Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are
arrays of the same shape
"""
if shape is None:
assert low.shape == high.shape
self.low = low
self.high = high
else:
assert np.isscalar(low) and np.isscalar(high)
self.low = low + np.zeros(shape)
self.high = high + np.zeros(shape)
def sample(self):
return np.random.uniform(
low=self.low, high=self.high,
size=self.low.shape).astype(np.float32)
def contains(self, x):
return bool(x.shape == self.shape and (x >= self.low).all() and (
x <= self.high).all())
@property
def shape(self):
return self.low.shape
@property
def flat_dim(self):
return np.prod(self.low.shape)
@property
def bounds(self):
return self.low, self.high
def flatten(self, x):
return np.asarray(x).flatten()
def unflatten(self, x):
return np.asarray(x).reshape(self.shape)
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], -1))
def unflatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0],) + self.shape)
def __repr__(self):
return "Box" + str(self.shape)
def __eq__(self, other):
return isinstance(other, Box) \
and np.allclose(self.low, other.low) \
and np.allclose(self.high, other.high)
def __hash__(self):
return hash((tuple(self.low.tolist()), tuple(self.high.tolist())))
def new_tensor_variable(self, name, extra_dims):
raise NotImplementedError
def clip(self, x):
return np.clip(x, self.low, self.high).reshape(self.shape)
def bound(self):
return self.low, self.high
| 2,212 | 25.987805 | 78 | py |
baconian-project | baconian-project-master/baconian/common/spaces/base.py | class Space:
"""
Provides a classification state spaces and action spaces,
so you can write generic code that applies to any Environment.
E.g. to choose a random action.
"""
def sample(self, seed=0):
"""
Uniformly randomly sample a random element of this space
"""
raise NotImplementedError
def contains(self, x):
"""
Return boolean specifying if x is a valid
member of this space
"""
raise NotImplementedError
def flatten(self, x):
raise NotImplementedError
def unflatten(self, x):
raise NotImplementedError
def flatten_n(self, xs):
raise NotImplementedError
def unflatten_n(self, xs):
raise NotImplementedError
@property
def flat_dim(self):
"""
The dimension of the flattened vector of the tensor representation
"""
raise NotImplementedError
def new_tensor_variable(self, name, extra_dims):
"""
Create a tensor variable given the name and extra dimensions
prepended
:param name: name of the variable
:param extra_dims: extra dimensions in the front
:return: the created tensor variable
"""
raise NotImplementedError
def clip(self, x):
raise NotImplementedError
def bound(self):
raise NotImplementedError
| 1,396 | 24.4 | 74 | py |
baconian-project | baconian-project-master/baconian/common/spaces/dict.py | """This is a garage-compatible wrapper for Dict spaces."""
from collections import OrderedDict
from baconian.common.spaces.base import Space
from baconian.common.spaces import Box
import numpy as np
import types
class Dict(Space):
"""
A dictionary of simpler spaces, e.g. Discrete, Box.
Example usage:
self.observation_space = spaces.Dict({"position": spaces.Discrete(2),
"velocity": spaces.Discrete(3)})
"""
def __init__(self, spaces):
"""
Convert and store the incoming spaces into an OrderedDict.
Note: classes inheriting from garage.Dict need to convert each
space in spaces to a garage.<class>.space.
"""
if isinstance(spaces, dict):
spaces = OrderedDict(sorted(list(spaces.items())))
if isinstance(spaces, list):
spaces = OrderedDict(spaces)
self.spaces = spaces
def contains(self, x):
"""
Check if x is contained within self.spaces.
Returns:
Boolean
"""
if isinstance(x, dict):
return bool(all(item in self.spaces.items() for item in x.items()))
else:
return False
def to_jsonable(self, sample_n):
"""
Serialize as a dict-representation of vectors.
Returns:
JSON (dict)
"""
return {key: space.to_jsonable([sample[key] for sample in sample_n]) \
for key, space in self.spaces.items()}
def from_jsonable(self, sample_n):
"""
Convert information from a JSON format into a list.
Returns:
ret (list)
"""
dict_of_list = {}
for key, space in self.spaces.items():
dict_of_list[key] = space.from_jsonable(sample_n[key])
ret = []
for i, _ in enumerate(dict_of_list[key]):
entry = {}
for key, value in dict_of_list.items():
entry[key] = value[i]
ret.append(entry)
return ret
@property
def flat_dim(self):
"""
Return a flat dimension of the dict space.
Returns:
flat_dim (int)
"""
raise NotImplementedError
def flatten(self, x):
"""
Return a flattened observation x.
Returns:
x (flattened)
"""
raise NotImplementedError
def unflatten(self, x):
"""
Return an unflattened observation x.
Returns:
x (unflattened)
"""
raise NotImplementedError
def flatten_n(self, xs):
"""
Return flattened observations xs.
Returns:
xs (flattened)
"""
raise NotImplementedError
def unflatten_n(self, xs):
"""
Return unflattened observations xs.
Returns:
xs (unflattened)
"""
raise NotImplementedError
def sample(self):
"""
Return a sample from each space in spaces.
Returns:
OrderedDict
"""
# raise NotImplementedError
# return OrderedDict([(k, space.sample()) for k, space in self.spaces.items()])
ordered = OrderedDict()
for k, space in self.spaces.items():
for a in space.low:
if np.isinf(a):
a = np.nan_to_num(a)
space.sample = types.MethodType(self._sample_with_nan, space)
for b in space.high:
if np.isinf(b):
b = np.nan_to_num(b)
space.sample = types.MethodType(self._sample_with_nan, space)
ordered.update([(k, space.sample())])
return ordered
def new_tensor_variable(self, name, extra_dims):
"""
Return a new tensor variable in the TF graph.
Returns:
Tensor
"""
raise NotImplementedError
@staticmethod
def _sample_with_nan(space: Space):
"""
:param space:
:return:
"""
from gym.spaces.box import Box as GymBox
if not isinstance(space, GymBox):
raise TypeError('space is not of type Box')
high = np.ones_like(space.low)
low = -1 * np.ones_like(space.high)
return np.clip(np.random.uniform(low=low, high=high, size=space.low.shape),
a_min=space.low,
a_max=space.high)
| 4,479 | 24.168539 | 87 | py |
baconian-project | baconian-project-master/baconian/common/spaces/discrete.py | import numpy as np
from baconian.common.spaces.base import Space
class Discrete(Space):
"""
{0,1,...,n-1}
"""
def __init__(self, n):
self._n = n
@property
def n(self):
return self._n
def sample(self):
return np.random.randint(self.n)
def contains(self, x):
if float(x).is_integer() is False:
return False
x = np.array(x, dtype=np.int)
return bool(x.shape == () and x.dtype.kind == 'i' and x >= 0 and x < self.n)
def __repr__(self):
return "Discrete(%d)" % self.n
def __eq__(self, other):
if not isinstance(other, Discrete):
return False
return self.n == other.n
def flatten(self, x):
import baconian.common.special as special
return special.to_onehot(x, self.n)
def unflatten(self, x):
import baconian.common.special as special
return special.from_onehot(x)
def flatten_n(self, x):
import baconian.common.special as special
return special.to_onehot_n(x, self.n)
def unflatten_n(self, x):
import baconian.common.special as special
return special.from_onehot_n(x)
@property
def flat_dim(self):
return self.n
def weighted_sample(self, weights):
import baconian.common.special as special
return special.weighted_sample(weights, range(self.n))
@property
def default_value(self):
return 0
def __hash__(self):
return hash(self.n)
def new_tensor_variable(self, name, extra_dims):
raise NotImplementedError
def clip(self, x):
x = np.asarray(x).astype(np.int)
assert x.shape == ()
if self.contains(x):
return x
else:
if x < 0:
return 0
elif x >= self.n:
return self.n - 1
def bound(self):
return 0, self.n
| 1,928 | 21.430233 | 84 | py |
baconian-project | baconian-project-master/baconian/common/spaces/__init__.py | from baconian.common.spaces.base import Space
from baconian.common.spaces.box import Box
from baconian.common.spaces.dict import Dict
from baconian.common.spaces.discrete import Discrete
from baconian.common.spaces.tuple import Tuple
__all__ = ["Space", "Box", "Dict", "Discrete", "Tuple"]
| 291 | 35.5 | 55 | py |
baconian-project | baconian-project-master/baconian/common/spaces/tuple.py | import numpy as np
from baconian.common.spaces.base import Space
class Tuple(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert len(components) == 1
components = components[0]
self._components = tuple(components)
dtypes = [
c.new_tensor_variable("tmp", extra_dims=0).dtype
for c in components
]
if dtypes and hasattr(dtypes[0], "as_numpy_dtype"):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple(x.sample() for x in self._components)
@property
def components(self):
return self._components
def contains(self, x):
return bool(isinstance(x, tuple) and all(
c.contains(xi) for c, xi in zip(self._components, x)))
@property
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate(
[c.flatten(xi) for c, xi in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [
c.flatten_n(xi) for c, xi in zip(self.components, xs_regrouped)
]
return np.concatenate(flat_regrouped, axis=-1)
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:-1])
return tuple(
c.unflatten(xi) for c, xi in zip(self._components, flat_xs))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:-1], axis=-1)
unflat_xs = [
c.unflatten_n(xi) for c, xi in zip(self.components, flat_xs)
]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if not isinstance(other, Tuple):
return False
return tuple(self.components) == tuple(other.components)
def __hash__(self):
return hash(tuple(self.components))
def new_tensor_variable(self, name, extra_dims):
raise NotImplementedError
def bound(self):
return (c.bound for c in self._components)
| 2,371 | 31.054054 | 78 | py |
baconian-project | baconian-project-master/baconian/examples/ppo_pendulum.py | # Date: 3/30/19
# Author: Luke
# Project: baconian-internal
"""
A simple example to show how to build up an experiment with ppo training and testing on Pendulum-v0
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func import MLPVValueFunc
from baconian.algo.ppo import PPO
from baconian.algo.policy.normal_distribution_mlp import NormalDistributionMLPPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import create_train_test_flow
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import PeriodicalEventSchedule
def task_fn():
env = make('Pendulum-v0')
name = 'demo_exp_'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_v = MLPVValueFunc(env_spec=env_spec,
name_scope=name + 'mlp_v',
name=name + 'mlp_v',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"L1_NORM": 0.01,
"L2_NORM": 0.01,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
policy = NormalDistributionMLPPolicy(env_spec=env_spec,
name_scope=name + 'mlp_policy',
name=name + 'mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"L1_NORM": 0.01,
"L2_NORM": 0.01,
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
ppo = PPO(
env_spec=env_spec,
config_or_config_dict={
"gamma": 0.995,
"lam": 0.98,
"policy_train_iter": 10,
"value_func_train_iter": 10,
"clipping_range": None,
"beta": 1.0,
"eta": 50,
"value_func_memory_size": 10,
"log_var_init": -1.0,
"kl_target": 0.003,
"policy_lr": 0.01,
"value_func_lr": 0.01,
"value_func_train_batch_size": 10,
"lr_multiplier": 1.0
},
value_func=mlp_v,
stochastic_policy=policy,
name=name + 'ppo'
)
agent = Agent(env=env, env_spec=env_spec,
algo=ppo,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
name=name + 'agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
flow = create_train_test_flow(
test_every_sample_count=10,
train_every_sample_count=10,
start_test_after_sample_count=5,
start_train_after_sample_count=5,
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=10)),
sample_func_and_args=(agent.sample, (), dict(sample_count=100,
env=agent.env,
sample_type='trajectory',
store_flag=True))
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import single_exp_runner
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 5,648 | 40.844444 | 99 | py |
baconian-project | baconian-project-master/baconian/examples/dqn_acrobot_example.py | from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import create_train_test_flow
from baconian.config.global_config import GlobalConfig
from baconian.common.schedules import LinearScheduler
from baconian.core.status import get_global_status_collect
def task_fn():
env = make('Acrobot-v1')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 256,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
dqn = DQN(env_spec=env_spec,
config_or_config_dict=dict(REPLAY_BUFFER_SIZE=50000,
GAMMA=0.99,
BATCH_SIZE=32,
LEARNING_RATE=0.001,
TRAIN_ITERATION=1,
DECAY=0),
name=name + '_dqn',
value_func=mlp_q)
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
schedule_timesteps=int(0.1 * 100000),
initial_p=1.0,
final_p=0.02),
init_random_prob=0.1),
noise_adder=None)
flow = create_train_test_flow(
test_every_sample_count=100,
train_every_sample_count=1,
start_test_after_sample_count=0,
start_train_after_sample_count=1000,
sample_func_and_args=(agent.sample, (), dict(sample_count=1,
env=agent.env,
store_flag=True)),
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=3)),
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 4,715 | 44.786408 | 98 | py |
baconian-project | baconian-project-master/baconian/examples/early_stopping_flow.py | """
This script show the example for adding an early stopping feature so when the agent can't increase its received average
reward for evaluation, the experiment will end early.
To do so in a extensible and modular way. We can implement a new flow called EarlyStoppingFlow that implement a special
ending condition detections by accessing the agent's evaluation reward (with built-in modular to access). Such mechanism
can be re-used by all algorithms, which avoid the redundant coding for users.
"""
from baconian.config.dict_config import DictConfig
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import PeriodicalEventSchedule
class EarlyStoppingFlow(TrainTestFlow):
required_key_dict = {
**TrainTestFlow.required_key_dict,
'USE_LAST_K_EVALUATION_REWARD': 10
}
def __init__(self, train_sample_count_func, config_or_config_dict: (DictConfig, dict), func_dict: dict, agent):
super().__init__(train_sample_count_func, config_or_config_dict, func_dict)
self.agent = agent
def _is_ended(self):
test_reward = sorted(self.agent.recorder.get_log(attr_name='sum_reward', filter_by_status=dict(status='TEST')),
key=lambda x: x['sample_counter'])
if len(test_reward) >= self.parameters('USE_LAST_K_EVALUATION_REWARD') * 2:
last_reward = test_reward[-self.parameters('USE_LAST_K_EVALUATION_REWARD'):]
pre_reward = test_reward[-self.parameters('USE_LAST_K_EVALUATION_REWARD') * 2: -self.parameters(
'USE_LAST_K_EVALUATION_REWARD')]
last_reward = np.mean([r['value'] for r in last_reward])
pre_reward = np.mean([r['value'] for r in pre_reward])
if last_reward < pre_reward:
ConsoleLogger().print('info', 'training ended because last {} step reward: {} < previous {} step reward {}'.format(self.parameters('USE_LAST_K_EVALUATION_REWARD'), last_reward, self.parameters('USE_LAST_K_EVALUATION_REWARD'), pre_reward))
return True
return super()._is_ended()
def create_early_stopping_flow(test_every_sample_count, train_every_sample_count, start_train_after_sample_count,
start_test_after_sample_count, train_func_and_args, test_func_and_args,
sample_func_and_args,
agent,
use_last_k_evaluation_reward,
train_samples_counter_func=None):
config_dict = dict(
TEST_EVERY_SAMPLE_COUNT=test_every_sample_count,
TRAIN_EVERY_SAMPLE_COUNT=train_every_sample_count,
START_TRAIN_AFTER_SAMPLE_COUNT=start_train_after_sample_count,
START_TEST_AFTER_SAMPLE_COUNT=start_test_after_sample_count,
USE_LAST_K_EVALUATION_REWARD=use_last_k_evaluation_reward
)
def return_func_dict(s_dict):
return dict(func=s_dict[0],
args=s_dict[1],
kwargs=s_dict[2])
func_dict = dict(
train=return_func_dict(train_func_and_args),
test=return_func_dict(test_func_and_args),
sample=return_func_dict(sample_func_and_args),
)
if train_samples_counter_func is None:
def default_train_samples_counter_func():
return get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
train_samples_counter_func = default_train_samples_counter_func
return EarlyStoppingFlow(config_or_config_dict=config_dict,
train_sample_count_func=train_samples_counter_func,
agent=agent,
func_dict=func_dict)
def task_fn():
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
config_or_config_dict={
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.999,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.001,
"DECAY": 0.5,
"BATCH_SIZE": 50,
"TRAIN_ITERATION": 1,
"critic_clip_norm": 0.1,
"actor_clip_norm": 0.1,
},
value_func=mlp_q,
policy=policy,
name=name + '_ddpg',
replay_buffer=None
)
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
flow = create_early_stopping_flow(
agent=agent,
use_last_k_evaluation_reward=5,
test_every_sample_count=10,
train_every_sample_count=10,
start_test_after_sample_count=5,
start_train_after_sample_count=5,
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=1)),
sample_func_and_args=(agent.sample, (), dict(sample_count=100,
env=agent.env))
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=2000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None))
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 8,729 | 44 | 254 | py |
baconian-project | baconian-project-master/baconian/examples/gp_dynamics.py | """
This gives a simple example on how to use Gaussian Process (GP) to approximate the Gym environment Pendulum-v0
We use gpflow package to build the Gaussian Process.
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
import numpy as np
from baconian.common.sampler.sample_data import TransitionData
from baconian.algo.policy import UniformRandomPolicy
from baconian.algo.dynamics.gaussian_process_dynamiocs_model import GaussianProcessDyanmicsModel
from baconian.algo.dynamics.dynamics_model import DynamicsEnvWrapper
from baconian.algo.dynamics.terminal_func.terminal_func import RandomTerminalFunc
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
data = TransitionData(env_spec=env_spec)
policy = UniformRandomPolicy(env_spec=env_spec)
# Do some initial sampling here to train GP model
st = env.reset()
for i in range(100):
ac = policy.forward(st)
new_st, re, _, _ = env.step(ac)
data.append(state=st, new_state=new_st, action=ac, reward=re, done=False)
st = new_st
gp = GaussianProcessDyanmicsModel(env_spec=env_spec, batch_data=data)
gp.init()
gp.train()
dyna_env = DynamicsEnvWrapper(dynamics=gp)
# Since we only care about the prediction here, so we pass the terminal function and reward function setting with
# random one
dyna_env.set_terminal_reward_func(terminal_func=RandomTerminalFunc(),
reward_func=RandomRewardFunc())
st = env.reset()
real_state_list = []
dynamics_state_list = []
test_sample_count = 100
for i in range(test_sample_count):
ac = env_spec.action_space.sample()
gp.reset_state(state=st)
new_state_dynamics, _, _, _ = dyna_env.step(action=ac, allow_clip=True)
new_state_real, _, done, _ = env.step(action=ac)
real_state_list.append(new_state_real)
dynamics_state_list.append(new_state_dynamics)
st = new_state_real
if done is True:
env.reset()
l1_loss = np.linalg.norm(np.array(real_state_list) - np.array(dynamics_state_list), ord=1)
l2_loss = np.linalg.norm(np.array(real_state_list) - np.array(dynamics_state_list), ord=2)
print('l1 loss is {}, l2 loss is {}'.format(l1_loss, l2_loss))
| 2,315 | 40.357143 | 113 | py |
baconian-project | baconian-project-master/baconian/examples/dyna.py | """
A simple example to show how to build up an experiment with Dyna training and testing on Pendulum-v0
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import PeriodicalEventSchedule
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.dyna import Dyna
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import create_dyna_flow
from baconian.common.data_pre_processing import RunningStandardScaler
def task_fn():
# create the gym environment by make function
env = make('Pendulum-v0')
# give your experiment a name which is used to generate the log path etc.
name = 'demo_exp'
# construct the environment specification
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
# construct the neural network to approximate q function of DDPG
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
# construct the neural network to approximate policy for DDPG
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
# construct the DDPG algorithms
ddpg = DDPG(
env_spec=env_spec,
config_or_config_dict={
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.999,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.001,
"DECAY": 0.5,
"BATCH_SIZE": 50,
"TRAIN_ITERATION": 1,
"critic_clip_norm": 0.1,
"actor_clip_norm": 0.1,
},
value_func=mlp_q,
policy=policy,
name=name + '_ddpg',
replay_buffer=None
)
# construct a neural network based global dynamics model to approximate the state transition of environment
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
learning_rate=0.01,
state_input_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
action_input_scaler=RunningStandardScaler(dims=env_spec.flat_action_dim),
output_delta_state_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": env_spec.flat_obs_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
# finally, construct the Dyna algorithms with a model free algorithm DDGP, and a NN model.
algo = Dyna(env_spec=env_spec,
name=name + '_dyna_algo',
model_free_algo=ddpg,
dynamics_model=mlp_dyna,
config_or_config_dict=dict(
dynamics_model_train_iter=10,
model_free_algo_train_iter=10
))
# To make the NN based dynamics model a proper environment so be a sampling source for DDPG, reward function and
# terminal function need to be set.
# For examples only, we use random reward function and terminal function with fixed episode length.
algo.set_terminal_reward_function_for_dynamics_env(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn),
reward_func=RandomRewardFunc())
# construct agent with additional exploration strategy if needed.
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
# construct the training flow, called Dyna flow. It defines how the training proceed, and the terminal condition
flow = create_dyna_flow(
train_algo_func=(agent.train, (), dict(state='state_agent_training')),
train_algo_from_synthesized_data_func=(agent.train, (), dict(state='state_agent_training')),
train_dynamics_func=(agent.train, (), dict(state='state_dynamics_training')),
test_algo_func=(agent.test, (), dict(sample_count=1)),
test_dynamics_func=(agent.algo.test_dynamics, (), dict(sample_count=10, env=env)),
sample_from_real_env_func=(agent.sample, (), dict(sample_count=10,
env=agent.env,
store_flag=True)),
sample_from_dynamics_env_func=(agent.sample, (), dict(sample_count=10,
env=agent.algo.dynamics_env,
store_flag=True)),
train_algo_every_real_sample_count_by_data_from_real_env=40,
train_algo_every_real_sample_count_by_data_from_dynamics_env=40,
test_algo_every_real_sample_count=40,
test_dynamics_every_real_sample_count=40,
train_dynamics_ever_real_sample_count=20,
start_train_algo_after_sample_count=1,
start_train_dynamics_after_sample_count=1,
start_test_algo_after_sample_count=1,
start_test_dynamics_after_sample_count=1,
warm_up_dynamics_samples=1
)
# construct the experiment
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name + '_exp'
)
# run!
experiment.run()
from baconian.core.experiment_runner import *
# set some global configuration here
# set DEFAULT_EXPERIMENT_END_POINT to indicate when to stop the experiment.
# one usually used is the TOTAL_AGENT_TRAIN_SAMPLE_COUNT, i.e., how many samples/timesteps are used for training
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=200))
# set the logging path to write log and save model checkpoints.
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
# feed the task into a exp runner.
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 9,525 | 46.158416 | 116 | py |
baconian-project | baconian-project-master/baconian/examples/ddpg_pendulum.py | """
A simple example to show how to build up an experiment with ddpg training and testing on Pendulum-v0
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import create_train_test_flow
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import PeriodicalEventSchedule
import baconian.common.log_data_loader as loader
from pathlib import Path
def task_fn():
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
config_or_config_dict={
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.999,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.001,
"DECAY": 0.5,
"BATCH_SIZE": 50,
"TRAIN_ITERATION": 1,
"critic_clip_norm": 0.1,
"actor_clip_norm": 0.1,
},
value_func=mlp_q,
policy=policy,
name=name + '_ddpg',
replay_buffer=None
)
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
flow = create_train_test_flow(
test_every_sample_count=10,
train_every_sample_count=10,
start_test_after_sample_count=5,
start_train_after_sample_count=5,
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=1)),
sample_func_and_args=(agent.sample, (), dict(sample_count=100,
env=agent.env,
store_flag=True))
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 5,169 | 40.693548 | 100 | py |
baconian-project | baconian-project-master/baconian/examples/scheduler_parameter_dqn.py | """
In this example, we demonstrate how to utilize the scheduler module to dynamically setting the
learning rate of your algorithm, or epsilon-greedy probability
"""
from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import create_train_test_flow
from baconian.config.global_config import GlobalConfig
from baconian.common.schedules import LinearScheduler, PiecewiseScheduler, PeriodicalEventSchedule
from baconian.core.status import get_global_status_collect
def task_fn():
env = make('Acrobot-v1')
name = 'example_scheduler_'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
dqn = DQN(env_spec=env_spec,
config_or_config_dict=dict(REPLAY_BUFFER_SIZE=1000,
GAMMA=0.99,
BATCH_SIZE=10,
LEARNING_RATE=0.001,
TRAIN_ITERATION=1,
DECAY=0.5),
name=name + '_dqn',
value_func=mlp_q)
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=PiecewiseScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
endpoints=((10, 0.3), (100, 0.1), (200, 0.0)),
outside_value=0.0
),
init_random_prob=0.5))
flow = create_train_test_flow(
test_every_sample_count=10,
train_every_sample_count=10,
start_test_after_sample_count=5,
start_train_after_sample_count=5,
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=10)),
sample_func_and_args=(agent.sample, (), dict(sample_count=100,
env=agent.env,
store_flag=True))
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name + 'experiment_debug'
)
dqn.parameters.set_scheduler(param_key='LEARNING_RATE',
scheduler=LinearScheduler(
t_fn=experiment.TOTAL_AGENT_TRAIN_SAMPLE_COUNT,
schedule_timesteps=GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'],
final_p=0.0001,
initial_p=0.01))
experiment.run()
from baconian.core.experiment_runner import single_exp_runner
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 4,857 | 46.165049 | 103 | py |
baconian-project | baconian-project-master/baconian/examples/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/examples/env_wrapper.py | """
A simple example to show how to wrap original environment's observation space, action space and reward function for
reshaping to boost the agent's training.
Actually, this is the feature supported by gym, but since we develop a new environment class based on gym's env, so
a tutorial is given th better introduce the usage of it.
"""
from baconian.envs.env_wrapper import ObservationWrapper, ActionWrapper, RewardWrapper
from baconian.envs.gym_env import make
class SmoothMountainCarReward(RewardWrapper):
def _reward(self, observation, action, reward, done, info):
return 10.0
car_env = make('MountainCarContinuous-v0')
car_env = SmoothMountainCarReward(env=car_env)
car_env.reset()
new_st, reward, terminal, info = car_env.step(action=car_env.action_space.sample())
print(reward)
| 804 | 34 | 115 | py |
baconian-project | baconian-project-master/baconian/examples/test_all_example.py | import glob
import os
import importlib.util
ABS_PATH = os.path.dirname(os.path.realpath(__file__))
def test_all():
file_list = glob.glob(os.path.join(ABS_PATH, '*.py'))
file_list.remove(os.path.realpath(__file__))
for f in file_list:
spec = importlib.util.spec_from_file_location('', f)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if __name__ == '__main__':
test_all()
| 449 | 22.684211 | 60 | py |
baconian-project | baconian-project-master/baconian/examples/model_ensemble_ddpg.py | """
An example showing the Model-ensemble method in
Kurutach, Thanard, et al. "Model-ensemble trust-region policy optimization." arXiv preprint arXiv:1802.10592 (2018).
Here we use Model-ensemble with DDPG instead of TRPO on Pendulum-v1, also resue the Dyna flow to show the flexibility of Baconian modules.
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import PeriodicalEventSchedule
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.model_ensemble import ModelEnsembleAlgo
from baconian.envs.envs_reward_func import PendulumRewardFunc
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import create_dyna_flow
from baconian.common.data_pre_processing import RunningStandardScaler
from baconian.core.ensemble import ModelEnsemble
def task_fn():
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
config_or_config_dict={
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.999,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.001,
"DECAY": 0.5,
"BATCH_SIZE": 50,
"TRAIN_ITERATION": 1,
"critic_clip_norm": 0.1,
"actor_clip_norm": 0.1,
},
value_func=mlp_q,
policy=policy,
name=name + '_ddpg',
replay_buffer=None
)
mlp_dyna_list = []
for i in range(10):
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna_{}'.format(i),
name=name + '_mlp_dyna_{}'.format(i),
learning_rate=0.01,
state_input_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
action_input_scaler=RunningStandardScaler(dims=env_spec.flat_action_dim),
output_delta_state_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": env_spec.flat_obs_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
mlp_dyna_list.append(mlp_dyna)
dyna_ensemble_model = ModelEnsemble(n_models=10, model=mlp_dyna_list, prediction_type='random', env_spec=env_spec)
algo = ModelEnsembleAlgo(
env_spec=env_spec,
model_free_algo=ddpg,
dynamics_model=dyna_ensemble_model,
config_or_config_dict=dict(
dynamics_model_train_iter=10,
model_free_algo_train_iter=10,
validation_trajectory_count=2,
)
)
# For examples only, we use random reward function and terminal function with fixed episode length.
algo.set_terminal_reward_function_for_dynamics_env(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn),
reward_func=PendulumRewardFunc())
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
trigger_every_step=200,
after_t=10),
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
# we can easily reuse the dyna training flow to implement the Model-ensemble training flow.
flow = create_dyna_flow(
train_algo_func=(agent.train, (), dict(state='state_agent_training')),
train_algo_from_synthesized_data_func=(agent.train, (), dict(state='state_agent_training')),
train_dynamics_func=(agent.train, (), dict(state='state_dynamics_training')),
test_algo_func=(agent.test, (), dict(sample_count=10)),
test_dynamics_func=(agent.algo.test_dynamics, (), dict(sample_count=10, env=env)),
sample_from_real_env_func=(agent.sample, (), dict(sample_count=10,
env=agent.env,
store_flag=True)),
sample_from_dynamics_env_func=(agent.sample, (), dict(sample_count=10,
env=agent.algo.dynamics_env,
store_flag=True)),
# set this to large enough so agent only use data from dynamics env.
train_algo_every_real_sample_count_by_data_from_real_env=100,
train_algo_every_real_sample_count_by_data_from_dynamics_env=100,
test_algo_every_real_sample_count=100,
test_dynamics_every_real_sample_count=100,
train_dynamics_ever_real_sample_count=100,
start_train_algo_after_sample_count=1,
start_train_dynamics_after_sample_count=1,
start_test_algo_after_sample_count=1,
start_test_dynamics_after_sample_count=1,
warm_up_dynamics_samples=100
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name + '_exp'
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=400))
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 9,054 | 44.732323 | 138 | py |
baconian-project | baconian-project-master/baconian/examples/mpc.py | """
A simple example to show how to build up an experiment with ddpg training and testing on MountainCarContinuous-v0
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import create_train_test_flow
from baconian.algo.mpc import ModelPredictiveControl
from baconian.algo.dynamics.terminal_func.terminal_func import RandomTerminalFunc
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc
from baconian.algo.policy import UniformRandomPolicy
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.config.global_config import GlobalConfig
def task_fn():
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"L1_NORM": 0.0,
"L2_NORM": 0.0,
"N_UNITS": env_spec.flat_obs_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
algo = ModelPredictiveControl(
dynamics_model=mlp_dyna,
env_spec=env_spec,
config_or_config_dict=dict(
SAMPLED_HORIZON=2,
SAMPLED_PATH_NUM=5,
dynamics_model_train_iter=10
),
name=name + '_mpc',
policy=UniformRandomPolicy(env_spec=env_spec, name='uni_policy')
)
algo.set_terminal_reward_function_for_dynamics_env(reward_func=RandomRewardFunc(name='reward_func'),
terminal_func=RandomTerminalFunc(name='random_terminal'), )
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5))
flow = create_train_test_flow(
test_every_sample_count=10,
train_every_sample_count=10,
start_test_after_sample_count=5,
start_train_after_sample_count=5,
train_func_and_args=(agent.train, (), dict()),
test_func_and_args=(agent.test, (), dict(sample_count=10)),
sample_func_and_args=(agent.sample, (), dict(sample_count=100,
env=agent.env,
store_flag=True))
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import single_exp_runner
GlobalConfig().set('DEFAULT_LOG_PATH', './log_path')
single_exp_runner(task_fn, del_if_log_path_existed=True)
| 3,518 | 36.042105 | 114 | py |
baconian-project | baconian-project-master/baconian/test/run_all_tests.py | from unittest import TestLoader, TextTestRunner, TestSuite
import sys
import os
path = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(path)
# print('join {} into environ path'.format(path))
src_dir = os.path.abspath(os.path.join(path, os.pardir, os.pardir))
sys.path.append(src_dir)
print('join {} into environ path'.format(src_dir))
def test_all(dir=''):
loader = TestLoader()
suite = TestSuite()
for all_test_suite in loader.discover(start_dir=os.path.join(path, 'tests', dir), pattern='test*.py'):
for test_case in all_test_suite:
suite.addTest(test_case)
TextTestRunner().run(test=suite)
if __name__ == '__main__':
test_all()
| 690 | 27.791667 | 106 | py |
baconian-project | baconian-project-master/baconian/test/__init__.py | import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(CURRENT_PATH)
PAR_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir))
sys.path.append(PAR_PATH)
| 203 | 24.5 | 65 | py |
baconian-project | baconian-project-master/baconian/test/tests/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_replay_buffer.py | from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.algo.misc.replay_buffer import UniformRandomReplayBuffer
class TestReplaybuffer(BaseTestCase):
def test_transition_data(self):
env = make('Acrobot-v1')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
a = UniformRandomReplayBuffer(limit=10000, action_shape=env_spec.action_shape,
observation_shape=env_spec.obs_shape)
st = env.reset()
for i in range(100):
ac = env_spec.action_space.sample()
st_new, re, done, _ = env.step(action=ac)
a.append(obs0=st, obs1=st_new, action=ac, reward=re, terminal1=done)
st = st_new
batch = a.sample(batch_size=10)
self.assertTrue(batch.state_set.shape[0] == 10)
self.assertTrue(batch.action_set.shape[0] == 10)
self.assertTrue(batch.reward_set.shape[0] == 10)
self.assertTrue(batch.done_set.shape[0] == 10)
self.assertTrue(batch.new_state_set.shape[0] == 10)
| 1,183 | 44.538462 | 86 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_copy_globally.py | from baconian.test.tests.set_up.setup import TestTensorflowSetup
from baconian.core.util import get_global_arg_dict
class TestCopyGlobally(TestTensorflowSetup):
def test_init_arg_decorator(self):
dqn, local = self.create_dqn()
env_spec = local['env_spec']
mlp_q = local['mlp_q']
dqn.init()
a = get_global_arg_dict()
self.assertTrue(dqn in a)
self.assertTrue(env_spec in a)
self.assertTrue(mlp_q in a)
print(a.keys())
self.setUp()
a = get_global_arg_dict()
| 551 | 28.052632 | 64 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_logger_recorder.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.common.logging import Logger, ConsoleLogger, Recorder, record_return_decorator
import numpy as np
from baconian.core.core import Basic, EnvSpec
from baconian.algo.dqn import DQN
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
class Foo(Basic):
def __init__(self, name='foo'):
super().__init__(name=name)
self.loss = 1.0
self.recorder = Recorder(flush_by_split_status=False, default_obj=self)
def get_status(self):
return dict(x=1)
def get_val(self):
return np.random.random()
@record_return_decorator(which_recorder='self')
def get_by_return(self, res, num=2, *args, **kwargs):
return dict(val=res * num, val2=res)
@property
def name(self):
return 'foo'
class TestLogger(TestWithAll):
def test_register(self):
obj = Foo()
a = Recorder(flush_by_split_status=False, default_obj=obj)
a.register_logging_attribute_by_record(obj=obj, attr_name='val', get_method=lambda x: x['obj'].get_val(),
static_flag=False)
a.register_logging_attribute_by_record(obj=obj, attr_name='loss', static_flag=True)
a.record()
print(a._obj_log)
self.assertTrue('val' in a._obj_log[obj])
self.assertTrue('loss' in a._obj_log[obj])
obj.loss = 10.0
a.record()
b = Recorder(flush_by_split_status=False, default_obj=obj)
b.register_logging_attribute_by_record(obj=obj, attr_name='val', get_method=lambda x: x['obj'].get_val(),
static_flag=False)
b.register_logging_attribute_by_record(obj=obj, attr_name='loss', static_flag=True)
b.record()
self.assertTrue('val' in b._obj_log[obj])
self.assertTrue('loss' in b._obj_log[obj])
obj.loss = 10.0
b.record()
self.assertTrue(b._obj_log is not a._obj_log)
self.assertTrue(b._registered_log_attr_by_get_dict is not a._registered_log_attr_by_get_dict)
def test_return_record(self):
obj = Foo(name='foo')
obj.get_by_return(res=10, num=2)
obj.get_by_return(res=1, num=2)
obj.get_by_return(res=2, num=4)
print(obj.recorder._obj_log)
self.assertEqual(len(obj.recorder._obj_log), 1)
self.assertTrue(obj in obj.recorder._obj_log)
self.assertTrue('val' in obj.recorder._obj_log[obj])
self.assertTrue(len(obj.recorder._obj_log[obj]['val']) == 3)
self.assertTrue(obj.recorder._obj_log[obj]['val'][0]['value'] == 20)
self.assertTrue(obj.recorder._obj_log[obj]['val'][1]['value'] == 2)
self.assertTrue(obj.recorder._obj_log[obj]['val'][2]['value'] == 8)
self.assertTrue('val2' in obj.recorder._obj_log[obj])
self.assertTrue(len(obj.recorder._obj_log[obj]['val2']) == 3)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][0]['value'] == 10)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][1]['value'] == 1)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][2]['value'] == 2)
obj = Foo(name='foo2')
obj.get_by_return(res=10, num=2)
obj.get_by_return(res=1, num=2)
obj.get_by_return(res=2, num=4)
print(obj.recorder._obj_log)
self.assertTrue(obj in obj.recorder._obj_log)
self.assertTrue('val' in obj.recorder._obj_log[obj])
self.assertTrue(len(obj.recorder._obj_log[obj]['val']) == 3)
self.assertTrue(obj.recorder._obj_log[obj]['val'][0]['value'] == 20)
self.assertTrue(obj.recorder._obj_log[obj]['val'][1]['value'] == 2)
self.assertTrue(obj.recorder._obj_log[obj]['val'][2]['value'] == 8)
self.assertTrue('val2' in obj.recorder._obj_log[obj])
self.assertTrue(len(obj.recorder._obj_log[obj]['val2']) == 3)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][0]['value'] == 10)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][1]['value'] == 1)
self.assertTrue(obj.recorder._obj_log[obj]['val2'][2]['value'] == 2)
class TesTLoggerWithDQN(TestWithAll):
def test_integration_with_dqn(self):
env = make('Acrobot-v1')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name='mlp_q',
name_scope='mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
dqn = DQN(env_spec=env_spec,
name='dqn_test',
config_or_config_dict=dict(REPLAY_BUFFER_SIZE=1000,
GAMMA=0.99,
BATCH_SIZE=10,
LEARNING_RATE=0.001,
TRAIN_ITERATION=1,
DECAY=0.5),
value_func=mlp_q)
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name='agent')
agent.init()
# dqn.init()
st = env.reset()
from baconian.common.sampler.sample_data import TransitionData
a = TransitionData(env_spec)
res = []
agent.sample(env=env,
sample_count=100,
in_which_status='TRAIN',
store_flag=True,
sample_type='transition')
agent.sample(env=env,
sample_count=100,
in_which_status='TRAIN',
store_flag=True,
sample_type='transition')
res.append(dqn.train(batch_data=a, train_iter=10, sess=None, update_target=True)['average_loss'])
res.append(dqn.train(batch_data=None, train_iter=10, sess=None, update_target=True)['average_loss'])
self.assertTrue(dqn in dqn.recorder._obj_log)
self.assertTrue('average_loss' in dqn.recorder._obj_log[dqn])
self.assertTrue(len(dqn.recorder._obj_log[dqn]['average_loss']) == 2)
self.assertTrue(
np.equal(np.array(res), [x['value'] for x in dqn.recorder._obj_log[dqn]['average_loss']]).all())
self.assertTrue(len(Logger()._registered_recorders) > 0)
self.assertTrue(dqn.recorder in Logger()._registered_recorders)
res = dqn.recorder.get_log(attr_name='average_loss', filter_by_status=dict())
self.assertEqual(len(res), 2)
res = agent.recorder.get_log(attr_name='sum_reward', filter_by_status={'status': 'TRAIN'})
self.assertEqual(len(res), 2)
res = agent.recorder.get_log(attr_name='sum_reward', filter_by_status={'status': 'TEST'})
self.assertEqual(len(res), 0)
Logger().flush_recorder()
def test_console_logger(self):
self.assertTrue(ConsoleLogger().inited_flag)
logger = ConsoleLogger()
self.assertTrue(logger.inited_flag)
logger.print('info', 'this is for test %s', 'args')
logger.print('info', 'this is for test {}'.format('args'))
logger2 = ConsoleLogger()
self.assertEqual(id(logger), id(logger2))
logger.flush()
if __name__ == '__main__':
import unittest
unittest.main()
| 8,343 | 43.382979 | 113 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_space.py | import unittest
from baconian.core.core import EnvSpec
from baconian.common.spaces import *
from baconian.common.special import *
from baconian.envs.gym_env import make
import numpy as np
from baconian.test.tests.set_up.setup import *
class TestSpace(TestWithLogSet):
def test_box(self):
pass
| 307 | 22.692308 | 46 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_data_pre_processing.py | from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.data_pre_processing import *
import numpy as np
class TestDataPreProcessing(BaseTestCase):
def test_min_max(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
print("test {} with sample {} dims {}".format(env, sample_fn, dims))
# test batch scaler
min_max = BatchMinMaxScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims),
data).all())
self.assertTrue(np.less_equal(np.zeros(dims),
data).all())
# test batch scaler with given range
min_max = BatchMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0))
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
data = min_max.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test batch scaler with given range and given initial data
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_data=np.array(data_list))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test batch scaler with given range and given initial min and max
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_min=np.min(np.array(data_list), axis=0),
init_max=np.max(np.array(data_list), axis=0))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test update function by a larger range of data
pre_min = np.min(np.array(data_list), axis=0)
pre_max = np.max(np.array(data_list), axis=0)
data_list = np.array(data_list) * 2.0
min_max.update_scaler(data_list)
self.assertTrue(np.equal(pre_min * 2.0, min_max._min).all())
self.assertTrue(np.equal(pre_max * 2.0, min_max._max).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
def test_standard_scaler(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
# test batch standard scaler
standard_scaler = BatchStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
data = standard_scaler.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test running standard scaler
standard_scaler = RunningStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update function
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given data
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_data=np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update of running scaler with given data
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given initial mean, var.
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_mean=np.mean(data_list, axis=0),
init_var=np.var(data_list, axis=0),
init_mean_var_data_count=100)
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
| 10,744 | 53.543147 | 97 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_noise.py | from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.noise import *
from baconian.common.schedules import *
t = 0
def get_t():
global t
return t
class TestNoise(BaseTestCase):
def test_all_noise(self):
action_w = LinearScheduler(t_fn=get_t,
schedule_timesteps=100,
final_p=1.0,
initial_p=0.0)
noise_w = LinearScheduler(t_fn=get_t,
final_p=0.0,
schedule_timesteps=100,
initial_p=1.0)
noise_wrapper = AgentActionNoiseWrapper(noise=OUNoise(),
action_weight_scheduler=action_w,
noise_weight_scheduler=noise_w)
for i in range(101):
print('action w {}, noise w {}'.format(noise_wrapper.action_weight_scheduler.value(),
noise_wrapper.noise_weight_scheduler.value()))
print(noise_wrapper(action=1.0))
if i == 100:
self.assertEqual(noise_wrapper(action=1.0), 1.0)
global t
t += 1
| 1,277 | 35.514286 | 97 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_misc.py | import unittest
from baconian.core.core import EnvSpec
from baconian.common.spaces import *
from baconian.common.special import *
from baconian.envs.gym_env import make
import numpy as np
from baconian.test.tests.set_up.setup import BaseTestCase
def create_env_spec():
env = EnvSpec(obs_space=Box(low=0.0,
high=1.0,
shape=[2]),
action_space=Box(low=0.0,
high=1.0,
shape=[2]))
return env
class TestMisc(BaseTestCase):
def test_env_spec(self):
env = create_env_spec()
self.assertEqual(env.flat_obs_dim, 2)
self.assertEqual(env.flat_action_dim, 2)
def test_misc_func(self):
env = make('Pendulum-v0')
a = make_batch(v=np.array([env.action_space.sample() for _ in range(10)]),
original_shape=env.env_spec.action_shape)
self.assertEqual(a.shape[0], 10)
self.assertTrue(a.shape[1:] == env.env_spec.action_shape)
for ac in a:
self.assertTrue(env.action_space.contains(ac))
a = make_batch(v=np.array([env.observation_space.sample() for _ in range(10)]),
original_shape=env.env_spec.obs_shape)
self.assertTrue(a.shape[1:] == env.env_spec.obs_shape)
for ac in a:
self.assertTrue(env.observation_space.contains(ac))
env = make('Acrobot-v1')
a = make_batch(v=np.array([env.action_space.sample() for _ in range(10)]),
original_shape=env.env_spec.action_shape)
self.assertEqual(a.shape[0], 10)
self.assertTrue(a.shape[1:] == env.env_spec.action_shape)
for ac in a:
self.assertTrue(env.action_space.contains(ac))
a = make_batch(v=np.array([env.observation_space.sample() for _ in range(10)]),
original_shape=env.env_spec.obs_shape)
self.assertEqual(a.shape[0], 10)
self.assertTrue(a.shape[1:] == env.env_spec.obs_shape)
for ac in a:
self.assertTrue(env.observation_space.contains(ac))
| 2,147 | 36.684211 | 87 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_sample_data.py | from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
import numpy as np
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.error import *
class TestSampleData(BaseTestCase):
def test_transition_data(self):
env = make('Acrobot-v1')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
a = TransitionData(env_spec)
st = env.reset()
for i in range(100):
ac = env_spec.action_space.sample()
st_new, re, done, _ = env.step(action=ac)
a.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
self.assertEqual(a.reward_set.shape[0], 100)
self.assertEqual(a.done_set.shape[0], 100)
self.assertEqual(a.action_set.shape[0], 100)
self.assertEqual(a.state_set.shape[0], 100)
self.assertEqual(a.new_state_set.shape[0], 100)
self.assertEqual(a('reward_set').shape[0], 100)
self.assertEqual(a('done_set').shape[0], 100)
self.assertEqual(a('state_set').shape[0], 100)
self.assertEqual(a('new_state_set').shape[0], 100)
self.assertEqual(a('action_set').shape[0], 100)
a = TransitionData(obs_shape=list(np.array(env_spec.obs_space.sample()).shape),
action_shape=list(np.array(env_spec.action_space.sample()).shape))
st = env.reset()
for i in range(100):
ac = env_spec.action_space.sample()
st_new, re, done, _ = env.step(action=ac)
a.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
self.assertEqual(a.reward_set.shape[0], 100)
self.assertEqual(a.done_set.shape[0], 100)
self.assertEqual(a.action_set.shape[0], 100)
self.assertEqual(a.state_set.shape[0], 100)
self.assertEqual(a.new_state_set.shape[0], 100)
self.assertEqual(a('reward_set').shape[0], 100)
self.assertEqual(a('done_set').shape[0], 100)
self.assertEqual(a('state_set').shape[0], 100)
self.assertEqual(a('new_state_set').shape[0], 100)
self.assertEqual(a('action_set').shape[0], 100)
self.assertTrue(np.equal(a.get_mean_of('state_set'), a.apply_op('state_set', np.mean)).all())
self.assertTrue(np.equal(a.get_sum_of('state_set'), a.apply_op('state_set', np.sum)).all())
self.assertTrue(np.equal(a.get_sum_of('reward_set'), a.apply_op('reward_set', np.sum)).all())
self.assertTrue(np.equal(a.get_sum_of('reward_set'), a.apply_op('reward_set', np.sum)).all())
self.assertTrue(np.equal(a.get_sum_of('action_set'), a.apply_op('action_set', np.sum)).all())
self.assertTrue(np.equal(a.get_sum_of('action_set'), a.apply_op('action_set', np.sum)).all())
self.assertTrue(np.equal(a.apply_op('state_set', np.max, axis=-1), np.max(a('state_set'), axis=-1)).all())
tmp_action = a('action_set').copy()
a.apply_transformation(set_name='action_set', func=lambda x: x * 2, direct_apply=False)
self.assertTrue(np.equal(tmp_action, a('action_set')).all())
a.apply_transformation(set_name='action_set', func=lambda x: x * 2, direct_apply=True)
self.assertTrue(np.equal(tmp_action * 2.0, a('action_set')).all())
try:
a.apply_transformation(set_name='action_set', func=lambda _: np.array([1, 2, 3]), direct_apply=True)
except TransformationResultedToDifferentShapeError as e:
pass
else:
raise TypeError
a.apply_transformation(set_name='action_set', func=lambda x: x // 2, direct_apply=True)
self.assertTrue(np.equal(tmp_action, a('action_set')).all())
index = np.arange(len(a._internal_data_dict['state_set'][0])).tolist()
b = a.get_copy()
a.shuffle(index=list(index))
for i in range(len(index)):
for key in a._internal_data_dict.keys():
self.assertTrue(np.equal(np.array(a._internal_data_dict[key][0][i]),
np.array(b._internal_data_dict[key][0][i])).all())
a.append_new_set(name='test', data_set=np.ones_like(a._internal_data_dict['state_set'][0]),
shape=a._internal_data_dict['state_set'][1])
a.reset()
self.assertEqual(a.reward_set.shape[0], 0)
self.assertEqual(a.done_set.shape[0], 0)
self.assertEqual(a.action_set.shape[0], 0)
self.assertEqual(a.state_set.shape[0], 0)
self.assertEqual(a.new_state_set.shape[0], 0)
self.assertEqual(a('reward_set').shape[0], 0)
self.assertEqual(a('done_set').shape[0], 0)
self.assertEqual(a('state_set').shape[0], 0)
self.assertEqual(a('new_state_set').shape[0], 0)
self.assertEqual(a('action_set').shape[0], 0)
def test_trajectory_data(self):
env = make('Acrobot-v1')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
a = TrajectoryData(env_spec)
tmp_traj = TransitionData(env_spec)
st = env.reset()
re_list = []
st_list = []
for i in range(100):
ac = env_spec.action_space.sample()
st_new, re, done, _ = env.step(action=ac)
st_list.append(st_new)
re_list.append(re)
if (i + 1) % 10 == 0:
done = True
else:
done = False
tmp_traj.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
if done:
a.append(tmp_traj.get_copy())
tmp_traj.reset()
self.assertEqual(a.trajectories.__len__(), 10)
for traj in a.trajectories:
self.assertEqual(len(traj), 10)
| 5,900 | 45.101563 | 114 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_common/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_common/test_status.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.core.status import StatusCollector
class TestStatus(TestWithAll):
def test_status_collector(self):
a = StatusCollector()
algo, local = self.create_dqn()
env = local['env']
env_spec = local['env_spec']
agent, _ = self.create_agent(algo=algo, env=env,
env_spec=env_spec,
eps=self.create_eps(env_spec=env_spec)[0])
self.register_global_status_when_test(env=env, agent=agent)
agent.init()
a.register_info_key_status(obj=agent, info_key='predict_counter', under_status='TRAIN',
return_name='train_counter')
a.register_info_key_status(obj=agent, info_key='predict_counter', under_status='TEST',
return_name='test_counter')
env.reset()
agent.sample(env=env, sample_count=10, store_flag=True, in_which_status='TRAIN')
agent.sample(env=env, sample_count=10, store_flag=True, in_which_status='TEST')
agent.sample(env=env, sample_count=10, store_flag=True, in_which_status='TRAIN')
res = a()
self.assertTrue(len(res) == 2)
self.assertTrue('train_counter' in res)
self.assertTrue('test_counter' in res)
self.assertTrue(res['test_counter'] == 10)
self.assertTrue(res['train_counter'] == 20)
class TestStatusWithDQN(TestWithAll):
def test_with_dqn(self):
dqn, local = self.create_dqn()
env = local['env']
env_spec = local['env_spec']
dqn.init()
st = env.reset()
from baconian.common.sampler.sample_data import TransitionData
a = TransitionData(env_spec)
res = []
for i in range(100):
ac = dqn.predict(obs=st, sess=self.sess, batch_flag=False)
st_new, re, done, _ = env.step(action=ac)
a.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
dqn.append_to_memory(a)
res.append(dqn.train(batch_data=a, train_iter=10, sess=None, update_target=True)['average_loss'])
res.append(dqn.train(batch_data=None, train_iter=10, sess=None, update_target=True)['average_loss'])
print(dqn._status())
print(dqn._status._info_dict_with_sub_info)
| 2,367 | 39.827586 | 108 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_basic.py | from baconian.envs.gym_env import GymEnv
from baconian.test.tests.set_up.setup import TestWithLogSet
from gym import make
class TestEnv(TestWithLogSet):
def test_gym_env(self):
a = GymEnv('Acrobot-v1')
a.set_status('TRAIN')
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
a.init()
a.seed(10)
a.reset()
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
for i in range(1000):
new_st, re, done, _ = a.step(action=a.action_space.sample())
self.assertEqual(a.total_step_count_fn(), i + 1)
if done is True:
a.reset()
self.assertEqual(a._last_reset_point, a.total_step_count_fn())
self.assertEqual(a._last_reset_point, i + 1)
# def test_all_get_state(self):
# type_list = []
# for id in GymEnv._all_gym_env_id:
# try:
# print(id)
# env = make(id)
# type_list.append(type(env).__name__)
# st = env.reset()
# self.assertTrue(env.observation_space.contains(st))
# assert env.observation_space.contains(st)
# del env
# except Exception:
# print("{} is not found".format(id))
# else:
# print("{} is found".format(id))
# print(set(type_list))
| 1,482 | 36.075 | 78 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_atari.py | from baconian.envs.gym_env import GymEnv
from baconian.test.tests.set_up.setup import TestWithLogSet
from gym import make
class TestEnv(TestWithLogSet):
def test_gym_env(self):
a = GymEnv('AirRaid-v0')
a.set_status('TRAIN')
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
a.init()
a.seed(10)
a.reset()
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
for i in range(1000):
new_st, re, done, _ = a.step(action=a.action_space.sample())
self.assertEqual(a.total_step_count_fn(), i + 1)
if done is True:
a.reset()
self.assertEqual(a._last_reset_point, a.total_step_count_fn())
self.assertEqual(a._last_reset_point, i + 1)
| 863 | 35 | 78 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_pybullet_env.py | from baconian.envs.gym_env import GymEnv
from baconian.test.tests.set_up.setup import TestWithLogSet
from gym import make
class TestEnv(TestWithLogSet):
def test_gym_env(self):
a = GymEnv('HalfCheetahBulletEnv-v0')
a.set_status('TRAIN')
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
a.init()
a.seed(10)
a.reset()
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
for i in range(1000):
new_st, re, done, _ = a.step(action=a.action_space.sample())
self.assertEqual(a.total_step_count_fn(), i + 1)
if done is True:
a.reset()
self.assertEqual(a._last_reset_point, a.total_step_count_fn())
self.assertEqual(a._last_reset_point, i + 1)
| 876 | 35.541667 | 78 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_env_wrapper.py | from baconian.envs.gym_env import GymEnv, make
from baconian.test.tests.set_up.setup import TestWithLogSet
from baconian.envs.env_wrapper import StepObservationWrapper
class TestEnvWrapper(TestWithLogSet):
def test_obs_wrapper(self):
env = make('Pendulum-v0')
env = StepObservationWrapper(env=env)
env.reset()
for i in range(10):
obs, _, _, _ = env.step(action=env.action_space.sample())
self.assertEqual(obs[-1], i + 1)
obs = env.reset()
self.assertEqual(obs[-1], 0)
self.assertTrue(env.observation_space.contains(obs))
self.assertTrue(env.action_space.contains(env.action_space.sample()))
self.assertTrue(env.observation_space.contains(env.reset()))
env.get_state()
| 778 | 37.95 | 77 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_dmcontrol.py | from baconian.envs.dmcontrol_env import have_mujoco_flag
import unittest
class TestEnv(unittest.TestCase):
def test_dmcontrol_env(self):
if have_mujoco_flag:
from baconian.envs.dmcontrol_env import DMControlEnv
a = DMControlEnv('cartpole', 'swingup')
a.set_status('TRAIN')
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
a.init()
a.seed(10)
a.reset()
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
for i in range(1000):
new_st, re, done, _ = a.step(action=a.action_space.sample())
self.assertEqual(a.total_step_count_fn(), i + 1)
if done is True:
a.reset()
self.assertEqual(a._last_reset_point, a.total_step_count_fn())
self.assertEqual(a._last_reset_point, i + 1)
| 996 | 38.88 | 82 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_env/test_robotics.py | from baconian.envs.gym_env import GymEnv
from baconian.test.tests.set_up.setup import TestWithLogSet
from gym import make
import os
import platform
from pathlib import Path
_PLATFORM = platform.system()
try:
_PLATFORM_SUFFIX = {
"Linux": "linux",
"Darwin": "macos",
"Windows": "win64"
}[_PLATFORM]
except KeyError:
raise OSError("Unsupported platform: {}".format(_PLATFORM))
have_mujoco_flag = True
try:
import mujoco_py
except Exception:
have_mujoco_flag = False
if have_mujoco_flag:
os.environ['LD_LIBRARY_PATH'] = os.environ.get('LD_LIBRARY_PATH', '') \
+ ':' + str(Path.home()) + '/.mujoco/mujoco200_{}/bin'.format(_PLATFORM_SUFFIX)
os.environ['MUJOCO_PY_MUJOCO_PATH'] = os.environ.get('MUJOCO_PY_MUJOCO_PATH', '') \
+ str(Path.home()) + '/.mujoco/mujoco200_{}'.format(_PLATFORM_SUFFIX)
class TestEnv(TestWithLogSet):
def test_gym_env(self):
if have_mujoco_flag:
a = GymEnv('FetchPickAndPlace-v1')
a.set_status('TRAIN')
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
a.init()
a.seed(10)
a.reset()
self.assertEqual(a.total_step_count_fn(), 0)
self.assertEqual(a._last_reset_point, 0)
for i in range(1000):
new_st, re, done, _ = a.step(action=a.action_space.sample())
self.assertEqual(a.total_step_count_fn(), i + 1)
if done is True:
a.reset()
self.assertEqual(a._last_reset_point, a.total_step_count_fn())
self.assertEqual(a._last_reset_point, i + 1)
| 1,769 | 33.705882 | 115 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_agent/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_agent/test_agent.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.common.sampler.sample_data import SampleData
class TestAgent(TestWithAll):
def test_agent(self):
algo, local = self.create_dqn()
env = local['env']
env_spec = local['env_spec']
agent, _ = self.create_agent(algo=algo, env=env,
env_spec=env_spec,
eps=self.create_eps(env_spec=env_spec)[0])
self.register_global_status_when_test(agent, env)
agent.init()
env.reset()
data = agent.sample(env=env, sample_count=10, store_flag=False, in_which_status='TEST')
self.assertTrue(isinstance(data, SampleData))
self.assertEqual(agent.algo.replay_buffer.nb_entries, 0)
data = agent.sample(env=env, sample_count=10, store_flag=True, in_which_status='TRAIN')
self.assertTrue(isinstance(data, SampleData))
self.assertEqual(agent.algo.replay_buffer.nb_entries, 10)
def test_test(self):
algo, local = self.create_dqn()
env = local['env']
env_spec = local['env_spec']
agent, _ = self.create_agent(algo=algo, env=env,
env_spec=env_spec,
eps=self.create_eps(env_spec=env_spec)[0])
self.register_global_status_when_test(agent, env)
agent.init()
env.reset()
agent.test(sample_count=2)
| 1,458 | 41.911765 | 95 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_algo.py | from baconian.test.tests.set_up.setup import TestTensorflowSetup
import tensorflow as tf
from baconian.algo.dynamics.dynamics_model import DifferentiableDynamics
import numpy as np
class TestBasicClassInAlgo(TestTensorflowSetup):
def test_derivable(self):
val = 10.0
val2 = 2.0
bs = 1
in_node = tf.placeholder(shape=[None, 10], dtype=tf.float32)
in_node2 = tf.placeholder(shape=[None, 10], dtype=tf.float32)
out_node = in_node * in_node * val + in_node2 * in_node2 * val2
a = DifferentiableDynamics(input_node_dict=dict(in_node=in_node, in_node2=in_node2),
output_node_dict=dict(out_node=out_node))
self.sess.run(tf.global_variables_initializer())
res = self.sess.run(a.grad_on_input(key_or_node='in_node'), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
print('jacobian {}'.format(np.array(res).shape))
# self.assertTrue(np.equal(res, val).all())
res = self.sess.run(a.grad_on_input(key_or_node='in_node2'), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
print('jacobian {}'.format(np.array(res).shape))
# self.assertTrue(np.equal(res, val2).all())
res = self.sess.run(a.grad_on_input(key_or_node=in_node), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
# self.assertTrue(np.equal(res, val).all())
res = self.sess.run(a.grad_on_input(key_or_node=in_node2), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
# self.assertTrue(np.equal(res, val2).all())
res = self.sess.run(a.grad_on_input(key_or_node=in_node, order=2), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
print('hessian {}'.format(np.array(res).shape))
# self.assertTrue(np.equal(res, 0).all())
res = self.sess.run(a.grad_on_input(key_or_node=in_node2, order=2), feed_dict={
in_node: np.random.random([bs, 10]),
in_node2: np.random.random([bs, 10])
})
print('hessian {}'.format(np.array(res).shape))
# self.assertTrue(np.equal(res, 0).all())
| 2,446 | 38.467742 | 92 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_misc/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits