repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_misc/test_exploration_strategy.py | from baconian.algo.misc import EpsilonGreedy
from baconian.test.tests.set_up.setup import TestWithAll
from baconian.common.schedules import LinearScheduler
x = 0
class TestExplorationStrategy(TestWithAll):
def test_eps_greedy(self):
dqn, locals = self.create_dqn()
dqn.init()
env = locals['env']
eps = EpsilonGreedy(action_space=dqn.env_spec.action_space,
init_random_prob=0.5)
st = env.reset()
for i in range(100):
ac = eps.predict(obs=st, sess=self.sess, batch_flag=False, algo=dqn)
st_new, re, done, _ = env.step(action=ac)
self.assertTrue(env.action_space.contains(ac))
def test_eps_with_scheduler(self):
dqn, locals = self.create_dqn()
env = locals['env']
def func():
global x
return x
dqn.init()
eps = EpsilonGreedy(action_space=dqn.env_spec.action_space,
prob_scheduler=LinearScheduler(initial_p=1.0, t_fn=func, schedule_timesteps=10,
final_p=0.0),
init_random_prob=1.0)
st = env.reset()
for i in range(10):
global x
ac = eps.predict(obs=st, sess=self.sess, batch_flag=False, algo=dqn)
st_new, re, done, _ = env.step(action=ac)
self.assertAlmostEqual(eps.parameters('random_prob_func')(), 1.0 - (1.0 - 0.0) / 10 * x)
x += 1
| 1,512 | 35.902439 | 107 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_value_func/test_mlp_v_func.py | import unittest
from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.algo.value_func import MLPVValueFunc
from baconian.test.tests.set_up.setup import TestTensorflowSetup
import tensorflow as tf
class TestMLPVValueFunc(TestTensorflowSetup):
def test_init(self):
env = make('Pendulum-v0')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
state_input = tf.placeholder(shape=[None, env_spec.flat_obs_dim],
dtype=tf.float32,
name='state_ph')
mlp_v = MLPVValueFunc(env_spec=env_spec,
name_scope='mlp_q',
name='mlp_q',
state_input=state_input,
output_low=None,
output_high=None,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
mlp_v.init()
mlp_v.forward(obs=env.observation_space.sample())
def test_copy(self):
env = make('Pendulum-v0')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
state_input = tf.placeholder(shape=[None, env_spec.flat_obs_dim],
dtype=tf.float32,
name='state_ph')
mlp_v = MLPVValueFunc(env_spec=env_spec,
name_scope='mlp_v',
name='mlp_v',
state_input=state_input,
output_low=None,
output_high=None,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
mlp_v.init()
new_mlp = mlp_v.make_copy(name='new_mlp',
name_scope='mlp_v',
reuse=True)
new_mlp.init()
self.assertGreater(len(mlp_v.parameters('tf_var_list')), 0)
self.assertGreater(len(new_mlp.parameters('tf_var_list')), 0)
for var1, var2 in zip(mlp_v.parameters('tf_var_list'), new_mlp.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertEqual(id(var1), id(var2))
not_reuse_mlp = mlp_v.make_copy(name='no-reuse-mlp',
name_scope='mlp_no_reuse',
reuse=False)
not_reuse_mlp.init()
self.assertGreater(len(not_reuse_mlp.parameters('tf_var_list')), 0)
for var1, var2 in zip(mlp_v.parameters('tf_var_list'), not_reuse_mlp.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertNotEqual(id(var1), id(var2))
if __name__ == '__main__':
unittest.main()
| 4,542 | 41.457944 | 104 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_value_func/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_value_func/test_mlp_q_value.py | import unittest
from baconian.common.special import *
from baconian.test.tests.set_up.setup import TestTensorflowSetup
class TestMLPQValueFunction(TestTensorflowSetup):
def test_init(self):
mlp_q, local = self.create_mlp_q_func(name='mlp_q')
env = local['env']
env_spec = local['env_spec']
mlp_q.init()
action = env.action_space.sample()
action = np.array([action])
action = flatten_n(env_spec.action_space, action)
mlp_q.forward(obs=env.observation_space.sample(), action=action)
action = env.action_space.sample()
action = np.array([action])
action = flatten_n(space=env.action_space, obs=action)
mlp_q.forward(obs=env.observation_space.sample(), action=action)
action = env.action_space.sample()
action = np.array([action])
action = flatten_n(space=mlp_q.env_spec.action_space,
obs=make_batch(action, original_shape=mlp_q.env_spec.action_shape))
mlp_q.forward(obs=env.observation_space.sample(), action=action)
def test_copy(self):
mlp_q, local = self.create_mlp_q_func(name='mlp_q')
mlp_q.init()
new_mlp = mlp_q.make_copy(name='new_mlp',
name_scope='mlp_q',
reuse=True)
new_mlp.init()
self.assertGreater(len(mlp_q.parameters('tf_var_list')), 0)
self.assertGreater(len(new_mlp.parameters('tf_var_list')), 0)
for var1, var2 in zip(mlp_q.parameters('tf_var_list'), new_mlp.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertEqual(id(var1), id(var2))
not_reuse_mlp = mlp_q.make_copy(name='no-reuse-mlp',
name_scope='mlp_no_reuse',
reuse=False)
not_reuse_mlp.init()
self.assertGreater(len(not_reuse_mlp.parameters('tf_var_list')), 0)
for var1, var2 in zip(mlp_q.parameters('tf_var_list'), not_reuse_mlp.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertNotEqual(id(var1), id(var2))
if __name__ == '__main__':
unittest.main()
| 2,251 | 35.322581 | 104 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_based/test_mpc.py | from baconian.common.sampler.sample_data import TransitionData
import unittest
from baconian.test.tests.set_up.setup import TestTensorflowSetup
class TestMPC(TestTensorflowSetup):
def test_init_discrete(self):
algo, locals = self.create_mpc()
env_spec = locals['env_spec']
env = locals['env']
algo.init()
for _ in range(100):
assert env_spec.action_space.contains(algo.predict(env_spec.obs_space.sample()))
st = env.reset()
data = TransitionData(env_spec)
for _ in range(10):
ac = algo.predict(st)
new_st, re, done, _ = env.step(action=ac)
data.append(state=st,
new_state=new_st,
reward=re,
action=ac,
done=done)
print(algo.train(batch_data=data))
def test_init_continuous(self):
algo, locals = self.create_mpc(env_id='Pendulum-v0')
env_spec = locals['env_spec']
env = locals['env']
algo.init()
for _ in range(100):
assert env_spec.action_space.contains(algo.predict(env_spec.obs_space.sample()))
st = env.reset()
data = TransitionData(env_spec)
for _ in range(10):
ac = algo.predict(st)
new_st, re, done, _ = env.step(action=ac)
data.append(state=st,
new_state=new_st,
reward=re,
action=ac,
done=done)
print(algo.train(batch_data=data))
def test_mpc_polymorphism(self):
policy_func = (
self.create_mlp_deterministic_policy, self.create_normal_dist_mlp_policy, self.create_uniform_policy,
self.create_constant_action_policy)
for i, func in enumerate(policy_func):
self.setUp()
wrap_policy(self, func=func)()
if i < len(policy_func) - 1:
self.tearDown()
def wrap_policy(self, func):
def wrap_func():
mlp_dyna, local = self.create_continue_dynamics_model(env_id='Pendulum-v0')
env_spec = local['env_spec']
env = local['env']
policy = func(env_spec=env_spec)[0]
algo, locals = self.create_mpc(env_spec=env_spec, mlp_dyna=mlp_dyna, policy=policy, env=env)
algo.init()
for _ in range(100):
assert env_spec.action_space.contains(algo.predict(env_spec.obs_space.sample()))
st = env.reset()
data = TransitionData(env_spec)
for _ in range(10):
ac = algo.predict(st)
new_st, re, done, _ = env.step(action=ac)
data.append(state=st,
new_state=new_st,
reward=re,
action=ac,
done=done)
print(algo.train(batch_data=data))
return wrap_func
if __name__ == '__main__':
unittest.main()
| 2,963 | 31.933333 | 113 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_based/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_based/test_dyna.py | from baconian.common.sampler.sample_data import TransitionData
from baconian.test.tests.set_up.setup import TestWithAll
import numpy as np
from baconian.algo.dynamics.terminal_func.terminal_func import *
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc
class TestDynamics(TestWithAll):
def test_init(self):
ddpg, locals = self.create_ddpg()
env_spec = locals['env_spec']
env = locals['env']
mlp_dyna = self.create_continuous_mlp_global_dynamics_model(env_spec=env_spec)[0]
algo = self.create_dyna(env_spec=env_spec, model_free_algo=ddpg, dyanmics_model=mlp_dyna)[0]
algo.init()
st = env.reset()
data = TransitionData(env_spec)
for _ in range(100):
ac = algo.predict(st)
new_st, re, done, _ = env.step(action=ac)
data.append(state=st,
new_state=new_st,
reward=re,
action=ac,
done=done)
algo.append_to_memory(samples=data)
pre_res = 10000
for i in range(20):
print(algo.train(batch_data=data))
print(algo.train(batch_data=data, state='state_dynamics_training'))
print(algo.train(batch_data=data, state='state_agent_training'))
res = algo.test_dynamics(env=env, sample_count=100)
self.assertLess(list(res.values())[0], pre_res)
self.assertLess(list(res.values())[1], pre_res)
print(res)
algo.test()
def test_dynamics_as_env(self):
env = self.create_env('Pendulum-v0')
env_spec = self.create_env_spec(env)
mlp_dyna = self.create_continuous_mlp_global_dynamics_model(env_spec=env_spec)[0]
env = mlp_dyna.return_as_env()
env.init()
env.set_terminal_reward_func(terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=10,
step_count_fn=lambda: env.total_step_count_fn() - env._last_reset_point),
reward_func=RandomRewardFunc())
env.reset()
self.assertEqual(env._last_reset_point, 0)
for i in range(11):
new_st, re, done, _ = env.step(action=env_spec.action_space.sample())
self.assertEqual(env.total_step_count_fn(), i + 1)
if done is True:
self.assertEqual(i, 9)
env.reset()
self.assertEqual(env._last_reset_point, env.total_step_count_fn())
self.assertEqual(env._last_reset_point, i + 1)
| 2,641 | 42.311475 | 155 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_free/test_ddpg.py | from baconian.common.sampler.sample_data import TransitionData
from baconian.test.tests.set_up.setup import TestWithAll
from baconian.config.global_config import GlobalConfig
class TestDDPG(TestWithAll):
def test_init(self):
ddpg, locals = self.create_ddpg()
self.assert_var_list_id_no_equal(var_list1=ddpg.actor.parameters('tf_var_list'),
var_list2=ddpg.target_actor.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(var_list1=ddpg.critic.parameters('tf_var_list'),
var_list2=ddpg.target_critic.parameters('tf_var_list'))
self.assert_var_list_id_equal(var_list1=ddpg.critic.parameters('tf_var_list'),
var_list2=ddpg._critic_with_actor_output.parameters('tf_var_list'))
self.assert_var_list_id_equal(var_list1=ddpg.target_critic.parameters('tf_var_list'),
var_list2=ddpg._target_critic_with_target_actor_output.parameters('tf_var_list'))
env = locals['env']
env_spec = locals['env_spec']
ddpg.init()
data = TransitionData(env_spec)
st = env.reset()
for i in range(100):
ac = ddpg.predict(st)
new_st, re, done, _ = env.step(ac)
data.append(state=st, new_state=new_st, action=ac, reward=re, done=done)
st = new_st
ddpg.append_to_memory(data)
new_ddpg, _ = self.create_ddpg(name='new_ddpg')
new_ddpg.copy_from(ddpg)
self.assert_var_list_equal(ddpg.actor.parameters('tf_var_list'),
new_ddpg.actor.parameters('tf_var_list'))
self.assert_var_list_equal(ddpg.critic.parameters('tf_var_list'),
new_ddpg.critic.parameters('tf_var_list'))
self.assert_var_list_equal(ddpg.target_actor.parameters('tf_var_list'),
new_ddpg.target_actor.parameters('tf_var_list'))
self.assert_var_list_equal(ddpg.target_critic.parameters('tf_var_list'),
new_ddpg.target_critic.parameters('tf_var_list'))
ddpg.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/ddpg_test',
global_step=0,
name=ddpg.name)
for i in range(100):
print(ddpg.train(train_iter=10))
self.assert_var_list_at_least_not_equal(ddpg.critic.parameters('tf_var_list'),
new_ddpg.critic.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(ddpg.target_critic.parameters('tf_var_list'),
new_ddpg.target_critic.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(ddpg.actor.parameters('tf_var_list'),
new_ddpg.actor.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(ddpg.target_actor.parameters('tf_var_list'),
new_ddpg.target_actor.parameters('tf_var_list'))
ddpg.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/ddpg_test',
model_name=ddpg.name,
global_step=0)
self.assert_var_list_equal(ddpg.actor.parameters('tf_var_list'),
new_ddpg.actor.parameters('tf_var_list'))
| 3,441 | 54.516129 | 119 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_free/test_dqn.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.config.global_config import GlobalConfig
from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.common.sampler.sample_data import TransitionData
import numpy as np
class TestDQN(TestWithAll):
def test_init(self):
dqn, locals = self.create_dqn()
env = locals['env']
env_spec = locals['env_spec']
dqn.init()
st = env.reset()
a = TransitionData(env_spec)
for i in range(100):
ac = dqn.predict(obs=st, sess=self.sess, batch_flag=False)
st_new, re, done, _ = env.step(action=ac)
a.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
st = st_new
dqn.append_to_memory(a)
new_dqn, _ = self.create_dqn(name='new_dqn')
new_dqn.copy_from(dqn)
self.assert_var_list_id_no_equal(dqn.q_value_func.parameters('tf_var_list'),
new_dqn.q_value_func.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(dqn.target_q_value_func.parameters('tf_var_list'),
new_dqn.target_q_value_func.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.q_value_func.parameters('tf_var_list'),
new_dqn.q_value_func.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.target_q_value_func.parameters('tf_var_list'),
new_dqn.target_q_value_func.parameters('tf_var_list'))
dqn.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/dqn_test',
global_step=0,
name=dqn.name)
for i in range(10):
print(dqn.train(batch_data=a, train_iter=10, sess=None, update_target=True))
print(dqn.train(batch_data=None, train_iter=10, sess=None, update_target=True))
self.assert_var_list_at_least_not_equal(dqn.q_value_func.parameters('tf_var_list'),
new_dqn.q_value_func.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(dqn.target_q_value_func.parameters('tf_var_list'),
new_dqn.target_q_value_func.parameters('tf_var_list'))
dqn.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/dqn_test',
model_name=dqn.name,
global_step=0)
self.assert_var_list_equal(dqn.q_value_func.parameters('tf_var_list'),
new_dqn.q_value_func.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.target_q_value_func.parameters('tf_var_list'),
new_dqn.target_q_value_func.parameters('tf_var_list'))
for i in range(10):
self.sess.run(dqn.update_target_q_value_func_op,
feed_dict=dqn.parameters.return_tf_parameter_feed_dict())
var1 = self.sess.run(dqn.q_value_func.parameters('tf_var_list'))
var2 = self.sess.run(dqn.target_q_value_func.parameters('tf_var_list'))
import numpy as np
total_diff = 0.0
for v1, v2 in zip(var1, var2):
total_diff += np.mean(np.abs(np.array(v1) - np.array(v2)))
print('update target, difference mean', total_diff)
def test_l1_l2_norm(self):
env = make('Acrobot-v1')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
name = 'dqn'
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp',
name=name + '_mlp',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03,
"L1_NORM": 1000.0,
"L2_NORM": 1000.0
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"L1_NORM": 1000.0,
"L2_NORM": 1000.0,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
dqn = DQN(env_spec=env_spec,
config_or_config_dict=dict(REPLAY_BUFFER_SIZE=1000,
GAMMA=0.99,
BATCH_SIZE=10,
LEARNING_RATE=0.01,
TRAIN_ITERATION=1,
DECAY=0.5),
name=name,
value_func=mlp_q)
dqn2, _ = self.create_dqn(name='dqn_2')
a = TransitionData(env_spec)
st = env.reset()
dqn.init()
dqn2.init()
for i in range(100):
ac = dqn.predict(obs=st, sess=self.sess, batch_flag=False)
st_new, re, done, _ = env.step(action=ac)
a.append(state=st, new_state=st_new, action=ac, done=done, reward=re)
st = st_new
dqn.append_to_memory(a)
for i in range(20):
print('dqn1 loss: ', dqn.train(batch_data=a, train_iter=10, sess=None, update_target=True))
print('dqn2 loss: ', dqn2.train(batch_data=a, train_iter=10, sess=None, update_target=True))
var_list = self.sess.run(dqn.q_value_func.parameters('tf_var_list'))
print(var_list)
var_list2 = self.sess.run(dqn2.q_value_func.parameters('tf_var_list'))
print(var_list2)
for var, var2 in zip(var_list, var_list2):
diff = np.abs(var2) - np.abs(var)
self.assertTrue(np.greater(np.mean(diff), 0.0).all())
| 6,608 | 49.068182 | 104 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_free/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_model_free/test_ppo.py | from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from baconian.test.tests.set_up.setup import TestWithAll
from baconian.config.global_config import GlobalConfig
class TestPPO(TestWithAll):
def test_init(self):
ppo, locals = self.create_ppo()
env = locals['env']
env_spec = locals['env_spec']
ppo.init()
new_ppo, _ = self.create_ppo(name='new_ppo')
new_ppo.copy_from(ppo)
self.assert_var_list_id_no_equal(ppo.value_func.parameters('tf_var_list'),
new_ppo.value_func.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(ppo.policy.parameters('tf_var_list'),
new_ppo.policy.parameters('tf_var_list'))
self.assert_var_list_equal(ppo.value_func.parameters('tf_var_list'),
new_ppo.value_func.parameters('tf_var_list'))
self.assert_var_list_equal(ppo.policy.parameters('tf_var_list'),
new_ppo.policy.parameters('tf_var_list'))
data = TransitionData(env_spec)
st = env.reset()
for i in range(100):
ac = ppo.predict(st)
assert ac.shape[0] == 1
self.assertTrue(env_spec.action_space.contains(ac[0]))
new_st, re, done, _ = env.step(ac)
if i % 9 == 0 and i > 0:
done = True
else:
done = False
data.append(state=st, new_state=new_st, action=ac, reward=re, done=done)
traj = TrajectoryData(env_spec=env_spec)
traj.append(data)
ppo.append_to_memory(traj)
ppo.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/ppo_test',
global_step=0,
name=ppo.name)
for i in range(5):
ppo.append_to_memory(traj)
res = ppo.train()
print('value_func_loss {}, policy_average_loss: {}'.format(res['value_func_loss'],
res['policy_average_loss']))
traj_data = TrajectoryData(env_spec=env_spec)
traj_data.append(data)
res = ppo.train(trajectory_data=traj_data,
train_iter=5,
sess=self.sess)
print('value_func_loss {}, policy_average_loss: {}'.format(res['value_func_loss'],
res['policy_average_loss']))
self.assert_var_list_at_least_not_equal(ppo.value_func.parameters('tf_var_list'),
new_ppo.value_func.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(ppo.policy.parameters('tf_var_list'),
new_ppo.policy.parameters('tf_var_list'))
ppo.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/ppo_test',
model_name=ppo.name,
global_step=0)
self.assert_var_list_equal(ppo.value_func.parameters('tf_var_list'),
new_ppo.value_func.parameters('tf_var_list'))
self.assert_var_list_equal(ppo.policy.parameters('tf_var_list'),
new_ppo.policy.parameters('tf_var_list'))
| 3,363 | 45.082192 | 99 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_policy/test_mlp_policy.py | from baconian.core.core import EnvSpec
from baconian.test.tests.set_up.setup import TestTensorflowSetup
from baconian.envs.gym_env import make
class TestDeterministicMLPPolicy(TestTensorflowSetup):
def test_mlp_deterministic_policy(self):
env = make('Pendulum-v0')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
policy, locals = self.create_mlp_deterministic_policy(name='mlp_policy', env_spec=env_spec)
policy.init()
for _ in range(10):
ac = policy.forward(obs=env.observation_space.sample())
self.assertTrue(env.action_space.contains(ac[0]))
p2 = policy.make_copy(name='test',
name_scope='test',
reuse=False)
p2.init()
self.assertGreater(len(policy.parameters('tf_var_list')), 0)
self.assertGreater(len(p2.parameters('tf_var_list')), 0)
for var1, var2 in zip(policy.parameters('tf_var_list'), p2.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertNotEqual(id(var1), id(var2))
p3 = policy.make_copy(name='mlp_policy_2',
name_scope='mlp_policy',
reuse=True)
p3.init()
self.assertGreater(len(p3.parameters('tf_var_list')), 0)
for var1, var2 in zip(policy.parameters('tf_var_list'), p3.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertEqual(id(var1), id(var2)) | 1,593 | 45.882353 | 99 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_policy/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_rl/test_policy/test_mlp_norm_policy.py | import unittest
from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.algo.policy.normal_distribution_mlp import NormalDistributionMLPPolicy
from baconian.common.special import *
from baconian.test.tests.set_up.setup import TestTensorflowSetup
class TestNormalDistMLPPolicy(TestTensorflowSetup):
def test_mlp_norm_dist_policy(self):
env = make('Pendulum-v0')
env.reset()
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
policy = NormalDistributionMLPPolicy(env_spec=env_spec,
name='mlp_policy',
name_scope='mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
output_high=None,
output_low=None,
output_norm=None,
input_norm=None,
reuse=False)
policy.init()
dist_info = policy.get_dist_info()
self.assertTrue(np.equal(dist_info[0]['shape'], policy.mean_output.shape.as_list()).all())
self.assertTrue(np.equal(dist_info[1]['shape'], policy.logvar_output.shape.as_list()).all())
for _ in range(10):
ac = policy.forward(obs=env.observation_space.sample())
self.assertTrue(env.action_space.contains(ac[0]))
p2 = policy.make_copy(name='test',
name_scope='mlp_policy_2',
reuse=False)
p2.init()
self.assertGreater(len(policy.parameters('tf_var_list')), 0)
self.assertGreater(len(p2.parameters('tf_var_list')), 0)
for var1, var2 in zip(policy.parameters('tf_var_list'), p2.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertNotEqual(id(var1), id(var2))
p3 = policy.make_copy(name='mlp_policy_ttt',
name_scope='mlp_policy',
reuse=True)
p3.init()
self.assertGreater(len(p3.parameters('tf_var_list')), 0)
for var1, var2 in zip(policy.parameters('tf_var_list'), p3.parameters('tf_var_list')):
self.assertEqual(var1.shape, var2.shape)
self.assertEqual(id(var1), id(var2))
# policy.copy_from(p2)]
res_not_true = []
for var1, var2, var3 in zip(policy.parameters('tf_var_list'), p2.parameters('tf_var_list'),
p3.parameters('tf_var_list')):
re1, re2, re3 = self.sess.run([var1, var2, var3])
res_not_true.append(np.isclose(re1, re2).all())
res_not_true.append(np.isclose(re3, re2).all())
self.assertTrue(np.isclose(re1, re3).all())
self.assertFalse(np.array(res_not_true).all())
policy.copy_from(p2)
for var1, var2, var3 in zip(policy.parameters('tf_var_list'), p2.parameters('tf_var_list'),
p3.parameters('tf_var_list')):
re1, re2, re3 = self.sess.run([var1, var2, var3])
self.assertTrue(np.isclose(re1, re3).all())
self.assertTrue(np.isclose(re2, re3).all())
self.assertTrue(np.isclose(re1, re2).all())
def test_func(self):
env = make('Pendulum-v0')
env.reset()
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
policy = NormalDistributionMLPPolicy(env_spec=env_spec,
name='mlp_policy',
name_scope='mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
output_high=None,
output_low=None,
output_norm=None,
input_norm=None,
reuse=False)
policy.init()
print(
policy.compute_dist_info(name='entropy',
feed_dict={
policy.state_input: make_batch(env_spec.obs_space.sample(),
original_shape=env_spec.obs_shape)}))
print(
policy.compute_dist_info(name='prob',
value=env_spec.action_space.sample(),
feed_dict={
policy.state_input: make_batch(env_spec.obs_space.sample(),
original_shape=env_spec.obs_shape),
policy.action_input: make_batch(env_spec.action_space.sample(),
original_shape=env_spec.action_shape)}))
new_policy = policy.make_copy(
reuse=False,
name='new_p',
name_scope='mlp_policy_2',
)
new_policy.init()
for var1, var2 in zip(policy.parameters('tf_var_list'), new_policy.parameters('tf_var_list')):
print(var1.name)
print(var2.name)
self.assertNotEqual(var1.name, var2.name)
self.assertNotEqual(id(var1), id(var2))
obs1 = make_batch(env_spec.obs_space.sample(),
original_shape=env_spec.obs_shape,
)
obs2 = make_batch(env_spec.obs_space.sample(),
original_shape=env_spec.obs_shape)
kl1 = policy.compute_dist_info(name='kl', other=new_policy, feed_dict={
policy.state_input: obs1,
new_policy.state_input: obs2
})
kl2 = self.sess.run(policy.kl(other=new_policy), feed_dict={
policy.state_input: obs1,
new_policy.state_input: obs2
})
self.assertTrue(np.isclose(kl1, kl2).all())
if __name__ == '__main__':
unittest.main()
| 8,519 | 51.269939 | 113 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_optimal_control/test_lqr_policy.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.algo.dynamics.reward_func.reward_func import QuadraticCostFunc
from baconian.envs.gym_env import make
import numpy as np
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.linear_dynamics_model import LinearDynamicsModel
from baconian.algo.policy.lqr_policy import LQRPolicy
class TestLQRPolicy(TestWithAll):
default_id = -1
def test_correctness(self):
env_id = 'Pendulum-v0'
env = make(env_id)
n = env.observation_space.flat_dim
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
F = np.ones([env.observation_space.flat_dim,
env.observation_space.flat_dim + env.action_space.flat_dim]) * 0.00001
# F[n:, n:] = 0.0001
dyna = LinearDynamicsModel(env_spec=env_spec,
state_transition_matrix=F,
bias=np.zeros([env.observation_space.flat_dim]))
C = np.ones([env.observation_space.flat_dim + env.action_space.flat_dim,
env.observation_space.flat_dim + env.action_space.flat_dim]) * 0.00001
c = np.ones([env.observation_space.flat_dim + env.action_space.flat_dim])
c[n:] = -1000
# C[:n, :] = 0.
# C[:, :n] = 0.
# c[:n] = 0.0
cost_fn = QuadraticCostFunc(C=C, c=c)
policy = LQRPolicy(env_spec=env_spec,
T=5,
dynamics=dyna,
cost_fn=cost_fn)
st = env.reset() * 0.0
for i in range(10):
ac = policy.forward(st)
st = dyna.step(action=ac, state=st, allow_clip=True)
print(cost_fn(state=st, action=ac, new_state=None))
print(st, ac)
| 1,852 | 41.113636 | 91 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_optimal_control/test_ilqr_policy.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.algo.dynamics.reward_func.reward_func import CostFunc
from baconian.envs.gym_env import make
import numpy as np
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import GlobalDynamicsModel
from baconian.algo.policy.ilqr_policy import iLQRPolicy
from baconian.algo.dynamics.dynamics_model import DynamicsEnvWrapper
from baconian.algo.dynamics.terminal_func.terminal_func import RandomTerminalFunc
class DebugDynamics(GlobalDynamicsModel):
flag = 0.5
st = None
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
return state + 0.0001 * action
# self.flag *= -1.0
# return np.ones_like(self.env_spec.obs_space.sample()) * self.flag
# return self.env_spec.obs_space.sample()
class DebuggingCostFunc(CostFunc):
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> float:
# return float(np.sum(action * action) + np.sum(state * state))
return float(np.sum(action + action * action))
class TestiLQRPolicy(TestWithAll):
def test_correctness(self):
env_id = 'Pendulum-v0'
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
dyna = DebugDynamics(env_spec=env_spec)
dyna = DynamicsEnvWrapper(dynamics=dyna)
dyna.set_terminal_reward_func(terminal_func=RandomTerminalFunc(),
reward_func=DebuggingCostFunc())
policy = iLQRPolicy(env_spec=env_spec,
T=10,
delta=0.05,
iteration=2,
dynamics=dyna,
dynamics_model_train_iter=10,
cost_fn=DebuggingCostFunc())
st = env.reset()
dyna.st = np.zeros_like(st)
for i in range(10):
ac = policy.forward(st)
st, _, _, _ = env.step(st)
# st = dyna.step(action=ac, state=st)
print("analytical optimal action -0.5, cost -0.25")
print('state: {}, action: {}, cost {}'.format(st, ac, policy.iLqr_instance.cost_fn(state=st, action=ac,
new_state=None)))
| 2,398 | 41.839286 | 115 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_optimal_control/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/set_up/setup.py | import os
import shutil
import numpy as np
import tensorflow as tf
from baconian.common.logging import Logger, ConsoleLogger
from baconian.config.global_config import GlobalConfig
from baconian.core.global_var import reset_all, get_all
from baconian.tf.util import create_new_tf_session
from baconian.test.tests.set_up.class_creator import ClassCreatorSetup
from baconian.core.status import reset_global_status_collect
class BaseTestCase(ClassCreatorSetup):
def setUp(self):
reset_all()
reset_global_status_collect()
for key, val in get_all().items():
self.assertTrue(len(val) == 0)
def tearDown(self):
reset_all()
class TestTensorflowSetup(BaseTestCase):
default_id = 0
def setUp(self):
BaseTestCase.setUp(self)
if tf.get_default_session():
sess = tf.get_default_session()
sess.__exit__(None, None, None)
tf.reset_default_graph()
print('set tf device as {}'.format(self.default_id))
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.default_id)
self.sess = create_new_tf_session()
def tearDown(self):
if self.sess.run(tf.report_uninitialized_variables()).shape[0] != 0:
print('some variables are not uninitialized:')
print(self.sess.run(tf.report_uninitialized_variables()))
print(self.sess.run(tf.report_uninitialized_variables()).shape)
raise AssertionError('some variables are not uninitialized')
if tf.get_default_session():
sess = tf.get_default_session()
sess.close()
BaseTestCase.tearDown(self)
def assert_var_list_equal(self, var_list1, var_list2):
for var1, var2 in zip(var_list1, var_list2):
res1, res2 = self.sess.run([var1, var2])
self.assertTrue(np.equal(res1, res2).all())
def assert_var_list_at_least_not_equal(self, var_list1, var_list2):
res = []
for var1, var2 in zip(var_list1, var_list2):
res1, res2 = self.sess.run([var1, var2])
res.append(np.equal(res1, res2).all())
self.assertFalse(np.array(res).all())
def assert_var_list_id_equal(self, var_list1, var_list2):
for var1, var2 in zip(var_list1, var_list2):
self.assertTrue(id(var1) == id(var2))
def assert_var_list_id_no_equal(self, var_list1, var_list2):
for var1, var2 in zip(var_list1, var_list2):
self.assertTrue(id(var1) != id(var2))
class TestWithLogSet(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
try:
shutil.rmtree(GlobalConfig().DEFAULT_LOG_PATH)
except FileNotFoundError:
pass
# os.makedirs(GlobalConfig().DEFAULT_LOG_PATH)
# self.assertFalse(ConsoleLogger().inited_flag)
# self.assertFalse(Logger().inited_flag)
Logger().init(config_or_config_dict=GlobalConfig().DEFAULT_LOG_CONFIG_DICT,
log_path=GlobalConfig().DEFAULT_LOG_PATH,
log_level=GlobalConfig().DEFAULT_LOG_LEVEL)
ConsoleLogger().init(logger_name='console_logger',
to_file_flag=True,
level=GlobalConfig().DEFAULT_LOG_LEVEL,
to_file_name=os.path.join(Logger().log_dir, 'console.log'))
self.assertTrue(ConsoleLogger().inited_flag)
self.assertTrue(Logger().inited_flag)
def tearDown(self):
Logger().reset()
ConsoleLogger().reset()
BaseTestCase.tearDown(self)
self.assertFalse(ConsoleLogger().inited_flag)
self.assertFalse(Logger().inited_flag)
class TestWithAll(TestTensorflowSetup, TestWithLogSet):
def setUp(self):
TestTensorflowSetup.setUp(self)
TestWithLogSet.setUp(self)
def tearDown(self):
TestWithLogSet.tearDown(self)
TestTensorflowSetup.tearDown(self)
class SimpleTestSetup(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
try:
shutil.rmtree(GlobalConfig().DEFAULT_LOG_PATH)
except FileNotFoundError:
pass
os.makedirs(GlobalConfig().DEFAULT_LOG_PATH)
self.assertFalse(ConsoleLogger().inited_flag)
self.assertFalse(Logger().inited_flag)
def tearDown(self):
BaseTestCase.tearDown(self)
| 4,356 | 33.856 | 88 | py |
baconian-project | baconian-project-master/baconian/test/tests/set_up/class_creator.py | import unittest
import tensorflow as tf
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.misc.placeholder_input import PlaceholderInput
from baconian.algo.dqn import DQN
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.core.core import Basic, EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.algo.policy import ConstantActionPolicy
from baconian.algo.value_func import MLPVValueFunc
from baconian.algo.policy.normal_distribution_mlp import NormalDistributionMLPPolicy
from baconian.algo.ppo import PPO
from baconian.core.parameters import Parameters, DictConfig
from baconian.algo.mpc import ModelPredictiveControl
from baconian.algo.dynamics.terminal_func.terminal_func import RandomTerminalFunc
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc, CostFunc
from baconian.algo.policy import UniformRandomPolicy
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.algo.dyna import Dyna
from baconian.common.schedules import *
from baconian.core.status import *
from baconian.algo.policy.ilqr_policy import iLQRPolicy
from baconian.algo.dynamics.random_dynamics_model import UniformRandomDynamicsModel
from baconian.common.noise import *
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.common.data_pre_processing import *
from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
class Foo(Basic):
def __init__(self):
super().__init__(name='foo')
required_key_dict = dict(var1=1, var2=0.1)
class ClassCreatorSetup(unittest.TestCase):
def create_env(self, env_id):
return make(env_id)
def create_env_spec(self, env):
return EnvSpec(action_space=env.action_space,
obs_space=env.observation_space)
def create_tf_parameters(self, name='test_tf_param'):
with tf.variable_scope(name):
a = tf.get_variable(shape=[3, 4], dtype=tf.float32, name='var_1')
b = tf.get_variable(shape=[3, 4], dtype=tf.bool, name='var_2')
conf = DictConfig(required_key_dict=Foo.required_key_dict,
config_dict=dict(var1=1, var2=0.01))
param = ParametersWithTensorflowVariable(tf_var_list=[a, b],
rest_parameters=dict(var3='sss'),
name=name,
source_config=conf,
require_snapshot=True,
to_ph_parameter_dict=dict(
var1=tf.placeholder(shape=(), dtype=tf.int32)))
return param, locals()
def create_mlp_q_func(self, env_id='Acrobot-v1', name='mlp_q'):
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name,
name=name,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03,
"L1_NORM": 0.2,
"L2_NORM": 0.1
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
return mlp_q, locals()
def create_dqn(self, env_id='Acrobot-v1', name='dqn'):
mlp_q, local = self.create_mlp_q_func(env_id, name='{}_mlp_q'.format(name))
env_spec = local['env_spec']
env = local['env']
dqn = DQN(env_spec=env_spec,
config_or_config_dict=dict(REPLAY_BUFFER_SIZE=1000,
GAMMA=0.99,
BATCH_SIZE=10,
LEARNING_RATE=0.001,
TRAIN_ITERATION=1,
DECAY=0.5),
name=name,
value_func=mlp_q)
return dqn, locals()
def create_ph(self, name):
with tf.variable_scope(name):
a = tf.get_variable(shape=[3, 4], dtype=tf.float32, name='var_1')
conf = DictConfig(required_key_dict=Foo.required_key_dict,
config_dict=dict(var1=1, var2=0.01))
param = ParametersWithTensorflowVariable(tf_var_list=[a],
rest_parameters=dict(var3='sss'),
name=name,
source_config=conf,
require_snapshot=True,
to_ph_parameter_dict=dict(
var1=tf.placeholder(shape=(), dtype=tf.int32)))
param.init()
a = PlaceholderInput(parameters=param)
return a, locals()
def create_ddpg(self, env_id='Pendulum-v0', name='ddpg'):
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + 'mlp_q',
name=name + 'mlp_q',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
self.assertTrue(len(mlp_q.parameters('tf_var_list')) == 4)
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + 'mlp_policy',
name=name + 'mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
self.assertTrue(len(policy.parameters('tf_var_list')) == 4)
ddpg = DDPG(
env_spec=env_spec,
config_or_config_dict={
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.999,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.001,
"DECAY": 0.5,
"BATCH_SIZE": 50,
"TRAIN_ITERATION": 1,
"critic_clip_norm": 0.1,
"actor_clip_norm": 0.1,
},
value_func=mlp_q,
policy=policy,
name=name,
replay_buffer=None
)
return ddpg, locals()
def create_mlp_v(self, env_id='Pendulum-v0', name='mlp_v'):
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_v = MLPVValueFunc(env_spec=env_spec,
name_scope=name + 'mlp_v',
name=name + 'mlp_v',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"L1_NORM": 0.01,
"L2_NORM": 0.01,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
return mlp_v, locals()
def create_normal_dist_mlp_policy(self, env_spec, name='norm_dist_p_'):
policy = NormalDistributionMLPPolicy(env_spec=env_spec,
name_scope=name + 'mlp_policy',
name=name + 'mlp_policy',
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"L1_NORM": 0.01,
"L2_NORM": 0.01,
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
reuse=False)
return policy, locals()
def create_ppo(self, env_id='Pendulum-v0', name='ppo'):
mlp_v, local = self.create_mlp_v(env_id, name)
env_spec = local['env_spec']
env = local['env']
policy = self.create_normal_dist_mlp_policy(env_spec=env_spec, name=name)[0]
ppo = PPO(
env_spec=env_spec,
config_or_config_dict={
"gamma": 0.995,
"value_func_memory_size": 10000,
"lam": 0.98,
"policy_train_iter": 10,
"value_func_train_iter": 10,
"clipping_range": None,
"beta": 1.0,
"eta": 50,
"log_var_init": -1.0,
"kl_target": 0.003,
"policy_lr": 0.01,
"value_func_lr": 0.01,
"value_func_train_batch_size": 10,
"lr_multiplier": 1.0
},
value_func=mlp_v,
stochastic_policy=policy,
name=name
)
return ppo, locals()
def create_dict_config(self):
a = DictConfig(required_key_dict=Foo.required_key_dict,
config_dict=dict(var1=1, var2=0.1),
cls_name='Foo')
return a, locals()
def create_parameters(self):
parameters = dict(param1='aaaa',
param2=12312,
param3=np.random.random([4, 2]))
source_config, _ = self.create_dict_config()
a = Parameters(parameters=parameters, source_config=source_config,
name='test_params')
return a, locals()
def create_continue_dynamics_model(self, env_id='Acrobot-v1', name='mlp_dyna'):
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_dyna, _ = self.create_continuous_mlp_global_dynamics_model(env_spec=env_spec, name=name)
return mlp_dyna, locals()
def create_mpc(self, env_id='Acrobot-v1', name='mpc', policy=None, mlp_dyna=None, env_spec=None, env=None):
if mlp_dyna is None:
mlp_dyna, local = self.create_continue_dynamics_model(env_id, name + 'mlp_dyna')
env_spec = local['env_spec']
env = local['env']
policy = policy if policy else UniformRandomPolicy(env_spec=env_spec, name='unp')
algo = ModelPredictiveControl(
dynamics_model=mlp_dyna,
env_spec=env_spec,
config_or_config_dict=dict(
SAMPLED_HORIZON=2,
SAMPLED_PATH_NUM=5,
dynamics_model_train_iter=10
),
name=name,
policy=policy
)
algo.set_terminal_reward_function_for_dynamics_env(terminal_func=RandomTerminalFunc(name='random_p'),
reward_func=RandomRewardFunc('re_fun'))
return algo, locals()
def create_eps(self, env_spec):
return EpsilonGreedy(action_space=env_spec.action_space,
init_random_prob=0.5), locals()
def create_agent(self, algo, env, env_spec, eps=None, name='agent'):
agent = Agent(env=env,
env_spec=env_spec,
algo=algo,
noise_adder=AgentActionNoiseWrapper(noise=OUNoise(),
action_weight_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
schedule_timesteps=100,
final_p=1.0,
initial_p=0.0),
noise_weight_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
schedule_timesteps=100,
final_p=0.0,
initial_p=1.0)),
name=name,
algo_saving_scheduler=PeriodicalEventSchedule(
t_fn=lambda: get_global_status_collect()('TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT'),
trigger_every_step=20,
after_t=10),
exploration_strategy=eps)
return agent, locals()
def create_train_test_flow(self, agent, traj_flag=False):
flow = TrainTestFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT'),
config_or_config_dict={
"TEST_EVERY_SAMPLE_COUNT": 10,
"TRAIN_EVERY_SAMPLE_COUNT": 10,
"START_TRAIN_AFTER_SAMPLE_COUNT": 5,
"START_TEST_AFTER_SAMPLE_COUNT": 5,
},
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=100 if not traj_flag else 1,
env=agent.env,
sample_type='trajectory' if traj_flag else 'transition',
in_which_status='TRAIN',
store_flag=True),
},
}
)
return flow
def create_dyna_flow(self, agent, env):
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict={
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 10,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 10,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 10,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 10,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 10,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 1
},
func_dict={
'train_algo': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_agent_training')},
'train_algo_from_synthesized_data': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_agent_training')},
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_dynamics_training')},
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=10)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=10, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=10,
env=agent.env,
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=10,
env=agent.algo.dynamics_env,
in_which_status='TRAIN',
store_flag=True)}
}
)
return flow, locals()
def create_exp(self, name, env, agent, flow=None, traj_flag=False):
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=self.create_train_test_flow(agent, traj_flag=traj_flag) if not flow else flow,
name=name + 'experiment_debug'
)
return experiment
def create_dyna(self, env_spec=None, model_free_algo=None, dyanmics_model=None,
name='dyna'):
if not env_spec:
model_free_algo, local = self.create_ddpg()
dyanmics_model, _ = self.create_continuous_mlp_global_dynamics_model(env_spec=local['env_spec'])
env_spec = local['env_spec']
env = local['env']
algo = Dyna(env_spec=env_spec,
name=name,
model_free_algo=model_free_algo,
dynamics_model=dyanmics_model,
config_or_config_dict=dict(
dynamics_model_train_iter=1,
model_free_algo_train_iter=1
))
algo.set_terminal_reward_function_for_dynamics_env(terminal_func=RandomTerminalFunc(),
reward_func=RandomRewardFunc())
return algo, locals()
def create_continuous_mlp_global_dynamics_model(self, env_spec, name='continuous_mlp_global_dynamics_model'):
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name,
name=name,
state_input_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
action_input_scaler=RunningStandardScaler(dims=env_spec.flat_action_dim),
output_delta_state_scaler=RunningStandardScaler(dims=env_spec.flat_obs_dim),
learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_obs_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
return mlp_dyna, locals()
def create_mlp_deterministic_policy(self, env_spec, name='mlp_policy'):
policy = DeterministicMLPPolicy(env_spec=env_spec,
name=name,
name_scope=name,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": env_spec.flat_action_dim,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
output_high=None,
output_low=None,
output_norm=None,
input_norm=None,
reuse=False)
return policy, locals()
def create_uniform_policy(self, env_spec, name='uni_policy'):
return UniformRandomPolicy(env_spec=env_spec, name=name), locals()
def create_constant_action_policy(self, env_spec, name='constant_policy'):
return ConstantActionPolicy(env_spec=env_spec,
name=name,
config_or_config_dict=dict(
ACTION_VALUE=np.array(env_spec.action_space.sample()))), locals()
def create_ilqr_policy(self, env_id='Pendulum-v0'):
class DebuggingCostFunc(CostFunc):
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> float:
return float(np.sum(action * action))
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
dyna = UniformRandomDynamicsModel(env_spec=env_spec)
dyna.init()
policy = iLQRPolicy(env_spec=env_spec,
T=50,
delta=0.0005,
iteration=5,
dynamics=dyna,
dynamics_model_train_iter=1,
cost_fn=DebuggingCostFunc())
return policy, locals()
def sample_transition(self, env, count=100):
data = TransitionData(env.env_spec)
st = env.get_state()
for i in range(count):
ac = env.env_spec.action_space.sample()
new_st, re, done, info = env.step(action=ac)
data.append(state=st,
action=ac,
new_state=new_st,
done=done,
reward=re)
return data
def register_global_status_when_test(self, agent, env):
"""
this func should be called only for unit test
:param agent:
:param env:
:return:
"""
get_global_status_collect().register_info_key_status(obj=agent,
info_key='predict_counter',
under_status='TRAIN',
return_name='TOTAL_AGENT_TRAIN_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=agent,
info_key='predict_counter',
under_status='TEST',
return_name='TOTAL_AGENT_TEST_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=agent,
info_key='update_counter',
under_status='TRAIN',
return_name='TOTAL_AGENT_UPDATE_COUNT')
get_global_status_collect().register_info_key_status(obj=env,
info_key='step',
under_status='TEST',
return_name='TOTAL_ENV_STEP_TEST_SAMPLE_COUNT')
get_global_status_collect().register_info_key_status(obj=env,
info_key='step',
under_status='TRAIN',
return_name='TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT')
| 29,484 | 48.30602 | 113 | py |
baconian-project | baconian-project-master/baconian/test/tests/set_up/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_tf/test_tf_util.py | import unittest
import numpy as np
from baconian.tf.util import MLPCreator
from baconian.test.tests.set_up.setup import TestTensorflowSetup
import tensorflow as tf
class TestTensorflowUtil(TestTensorflowSetup):
def test_init_with_tf_layers(self):
input_ph = tf.placeholder(dtype=tf.float32, shape=[None, 5], name='ph1')
input_ph2 = tf.placeholder(dtype=tf.float32, shape=[None, 5], name='ph1')
net1_name = 'net'
net1_scope = 'scope'
net2_name = 'net'
net2_scope = 'scope'
net1 = MLPCreator.create_network_with_tf_layers(input=input_ph,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net1_name,
reuse=False,
tf_var_scope=net1_scope)
net2 = MLPCreator.create_network_with_tf_layers(input=input_ph,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net2_name,
reuse=True,
tf_var_scope=net2_scope)
self.assertGreater(len(net1[2]), 0)
self.assertGreater(len(net2[2]), 0)
self.sess.run(tf.global_variables_initializer())
for var1, var2 in zip(net1[2], net2[2]):
print("net1: {} {} | true name: {}".format(net1_scope, net1_name, var1.name))
print("net2: {} {} | true name: {}".format(net2_scope, net2_name, var2.name))
self.assertEqual(id(var1), id(var2))
self.assertTrue(net1_name in var1.name)
self.assertTrue(net1_scope in var1.name)
self.assertTrue(net2_name in var2.name)
self.assertTrue(net2_scope in var2.name)
var = net1[2][0]
op = tf.assign(var,
tf.constant(value=np.random.random(list(self.sess.run(tf.shape(var)))),
dtype=tf.float32))
self.sess.run(op)
var1 = self.sess.run(var)
var2 = self.sess.run(net2[2][0])
self.assertTrue(np.equal(var1, var2).all())
def test_init_with_tf_layers_2(self):
input_ph = tf.placeholder(dtype=tf.float32, shape=[None, 5], name='ph1')
net1_name = 'net'
net1_scope = 'scope'
net2_name = 'net'
net2_scope = 'scope1'
net1 = MLPCreator.create_network_with_tf_layers(input=input_ph,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net1_name,
reuse=False,
tf_var_scope=net1_scope)
net2 = MLPCreator.create_network_with_tf_layers(input=input_ph,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net2_name,
reuse=False,
tf_var_scope=net2_scope)
self.assertGreater(len(net1[2]), 0)
self.assertGreater(len(net2[2]), 0)
self.sess.run(tf.global_variables_initializer())
for var1, var2 in zip(net1[2], net2[2]):
print("net1: {} {} | true name: {}".format(net1_scope, net1_name, var1.name))
print("net2: {} {} | true name: {}".format(net2_scope, net2_name, var2.name))
self.assertFalse(id(var1) == id(var2))
self.assertTrue(net1_name in var1.name)
self.assertTrue(net1_scope in var1.name)
self.assertTrue(net2_name in var2.name)
self.assertTrue(net2_scope in var2.name)
var = net1[2][0]
op = tf.assign(var,
tf.constant(value=np.random.random(list(self.sess.run(tf.shape(var)))),
dtype=tf.float32))
self.sess.run(op)
var1 = self.sess.run(var)
var2 = self.sess.run(net2[2][0])
self.assertFalse(np.equal(var1, var2).all())
def test_init_with_tf_layers_3(self):
input_ph = tf.placeholder(dtype=tf.float32, shape=[None, 5], name='ph1')
net1_name = 'net'
net1_scope = 'scope'
net2_name = 'net'
net2_scope = 'scope'
net1 = MLPCreator.create_network_with_tf_layers(input=input_ph,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net1_name,
reuse=False,
tf_var_scope=net1_scope)
input_ph2 = tf.placeholder(dtype=tf.float32, shape=[None, 5], name='ph1')
input_ph2 = tf.tanh(input_ph2)
net2 = MLPCreator.create_network_with_tf_layers(input=input_ph2,
network_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 16,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
],
net_name=net2_name,
reuse=True,
tf_var_scope=net2_scope)
self.assertGreater(len(net1[2]), 0)
self.assertGreater(len(net2[2]), 0)
self.assertEqual(len(net1), len(net2))
self.sess.run(tf.global_variables_initializer())
for var1, var2 in zip(net1[2], net2[2]):
print("net1: {} {} | true name: {}".format(net1_scope, net1_name, var1.name))
print("net2: {} {} | true name: {}".format(net2_scope, net2_name, var2.name))
self.assertTrue(id(var1) == id(var2))
self.assertTrue(net1_name in var1.name)
self.assertTrue(net1_scope in var1.name)
self.assertTrue(net2_name in var2.name)
self.assertTrue(net2_scope in var2.name)
var = net1[2][0]
op = tf.assign(var,
tf.constant(value=np.random.random(list(self.sess.run(tf.shape(var)))),
dtype=tf.float32))
self.sess.run(op)
var1 = self.sess.run(var)
var2 = self.sess.run(net2[2][0])
self.assertTrue(np.equal(var1, var2).all())
if __name__ == '__main__':
unittest.main()
| 14,170 | 59.04661 | 94 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_tf/test_tf_paramters.py | import numpy as np
import tensorflow as tf
from baconian.config.global_config import GlobalConfig
from baconian.tf.util import create_new_tf_session
from baconian.test.tests.set_up.setup import TestWithAll
class TestTensorflowParameters(TestWithAll):
def test_tf_param(self):
param, _ = self.create_tf_parameters()
param.init()
param.save_snapshot()
param.load_snapshot()
para2, _ = self.create_tf_parameters(name='para2')
para2.init()
para2.copy_from(param)
for key in param._source_config.required_key_dict.keys():
if isinstance(param[key], tf.Tensor):
continue
if isinstance(param[key], np.ndarray):
self.assertTrue(np.equal(param[key], para2[key]).all())
else:
self.assertEqual(param[key], para2[key])
self.assertEqual(param(key), para2(key))
for key in param._parameters.keys():
if isinstance(param[key], tf.Tensor):
continue
if isinstance(param[key], np.ndarray):
self.assertTrue(np.equal(param[key], para2[key]).all())
else:
self.assertEqual(param[key], para2[key])
self.assertEqual(param(key), para2(key))
def test_save_load(self):
param, _ = self.create_tf_parameters('param')
param.init()
var_val = [self.sess.run(var) for var in param('tf_var_list')]
param_other, _ = self.create_tf_parameters(name='other_param')
param_other.init()
for i in range(10):
param.save(sess=self.sess,
save_path=GlobalConfig().DEFAULT_LOG_PATH + '/model',
global_step=i)
if tf.get_default_session():
sess = tf.get_default_session()
sess.__exit__(None, None, None)
tf.reset_default_graph()
print('set tf device as {}'.format(self.default_id))
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.default_id)
self.sess = create_new_tf_session()
param2, _ = self.create_tf_parameters('param')
param2.init()
param2.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/model', global_step=9)
for var1, var2 in zip(var_val, param2('tf_var_list')):
self.assertTrue(np.equal(var1, self.sess.run(var2)).all())
| 2,410 | 34.985075 | 92 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_tf/test_placeholder_input.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.config.global_config import GlobalConfig
import glob
class TestPlaceholderInput(TestWithAll):
def test_tf_param(self):
a, _ = self.create_ph('test')
for i in range(5):
a.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input',
global_step=i,
name='a')
file = glob.glob(GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input/a*.meta')
self.assertTrue(len(file) == 5)
b, _ = self.create_ph('b')
b.copy_from(obj=a)
self.assert_var_list_equal(a.parameters('tf_var_list'),
b.parameters('tf_var_list'))
a.parameters.init()
self.assert_var_list_at_least_not_equal(a.parameters('tf_var_list'),
b.parameters('tf_var_list'))
a.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input',
global_step=4,
model_name='a')
self.assert_var_list_equal(a.parameters('tf_var_list'),
b.parameters('tf_var_list'))
def test_save_load_with_dqn(self):
dqn, locals = self.create_dqn()
dqn.init()
for i in range(5):
dqn.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input', global_step=i, name='dqn')
file = glob.glob(GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input/dqn*.meta')
self.assertTrue(len(file) == 5)
dqn2, _ = self.create_dqn(name='dqn_2')
dqn2.copy_from(dqn)
self.assert_var_list_equal(dqn.parameters('tf_var_list'), dqn2.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.q_value_func.parameters('tf_var_list'),
dqn2.q_value_func.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.target_q_value_func.parameters('tf_var_list'),
dqn2.target_q_value_func.parameters('tf_var_list'))
dqn.init()
self.assert_var_list_at_least_not_equal(dqn.q_value_func.parameters('tf_var_list'),
dqn2.q_value_func.parameters('tf_var_list'))
self.assert_var_list_at_least_not_equal(dqn.target_q_value_func.parameters('tf_var_list'),
dqn2.target_q_value_func.parameters('tf_var_list'))
dqn.load(path_to_model=GlobalConfig().DEFAULT_LOG_PATH + '/test_placehoder_input', global_step=4,
model_name='dqn')
self.assert_var_list_equal(dqn.parameters('tf_var_list'), dqn2.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.q_value_func.parameters('tf_var_list'),
dqn2.q_value_func.parameters('tf_var_list'))
self.assert_var_list_equal(dqn.target_q_value_func.parameters('tf_var_list'),
dqn2.target_q_value_func.parameters('tf_var_list'))
| 3,061 | 50.033333 | 117 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_tf/test_tf_probability.py | import tensorflow as tf
import tensorflow_probability as tfp
import unittest
from baconian.core.core import EnvSpec
import numpy as np
from baconian.envs.gym_env import make
from baconian.common.special import *
from baconian.test.tests.set_up.setup import TestTensorflowSetup
import baconian.algo.distribution.mvn as mvn
def describe_sample_tensor_shape(sample_shape, distribution):
print('Sample shape:', sample_shape)
print('Returned sample tensor shape:',
distribution.sample(sample_shape).shape)
def describe_sample_tensor_shapes(distributions, sample_shapes):
started = False
for distribution in distributions:
print(distribution)
for sample_shape in sample_shapes:
describe_sample_tensor_shape(sample_shape, distribution)
print()
def kl_entropy_logprob_from_pat_cody(old_mean, old_var, mean, var, sess, action_dim, action_ph, feed_dict):
# logvar = tf.reduce_sum(tf.log(var))
# old_log_var = tf.reduce_sum(tf.log(old_var))
"""
KL(old|new)
:param old_mean:
:param old_var:
:param mean:
:param var:
:param sess:
:param action_dim:
:param action_ph:
:param feed_dict:
:return:
"""
logvar = tf.log(var)
old_log_var = tf.log(old_var)
log_det_cov_old = tf.reduce_sum(old_log_var)
log_det_cov_new = tf.reduce_sum(logvar)
tr_old_new = tf.reduce_sum(tf.exp(old_log_var - logvar))
kl = 0.5 * tf.reduce_mean(log_det_cov_new - log_det_cov_old + tr_old_new +
tf.reduce_sum(tf.square(mean - old_mean) /
tf.exp(logvar), axis=1) -
action_dim)
# kl = 0.5 * (log_det_cov_new - log_det_cov_old + tr_old_new +
# tf.reduce_sum(tf.square(mean - old_mean) /
# tf.exp(logvar), axis=0) -
# action_dim)
entropy = 0.5 * (action_dim * (np.log(2 * np.pi) + 1) +
tf.reduce_sum(logvar))
logp = -0.5 * tf.reduce_sum(tf.log(tf.exp(logvar) * 2 * np.pi))
logp += -0.5 * tf.reduce_sum(tf.square(action_ph - mean) /
tf.exp(logvar), axis=1)
# logp += -0.5 * np.log(2 * np.pi * action_dim)
logp_old = -0.5 * tf.reduce_sum(tf.log(tf.exp(old_log_var) * 2 * np.pi))
logp_old += -0.5 * tf.reduce_sum(tf.square(action_ph - old_mean) /
tf.exp(old_log_var), axis=1)
# logp_old += -0.5 * np.log(2 * np.pi * action_dim)
return sess.run([kl, entropy, logp, logp_old], feed_dict=feed_dict)
def kl_entropy_logprob_from_mvn(old_mean, old_var, mean, var, sess, action_dim, action_ph, feed_dict):
kl = mvn.kl(old_mean, old_var, mean, var, action_dim)
entropy = mvn.entropy(mean, var, action_dim)
logp = mvn.log_prob(action_ph, mean, var)
logp_old = mvn.log_prob(action_ph, old_mean, old_var)
return sess.run([kl, entropy, logp, logp_old], feed_dict=feed_dict)
class TestTFP(TestTensorflowSetup):
def test_init(self):
sess = self.sess
env = make('Pendulum-v0')
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
action_dim = env_spec.flat_action_dim
state_dim = env_spec.flat_obs_dim
# bs_shape = tf.placeholder(dtype=tf.int8, shape=[])
bs_shape = 4
action_ph = tf.placeholder(dtype=tf.float32, shape=[None, action_dim])
state_ph = tf.placeholder(dtype=tf.float32, shape=[None, state_dim])
mean_old = tf.layers.dense(inputs=state_ph,
name='layer1',
units=action_dim)
mean2 = tf.layers.dense(inputs=state_ph,
name='layer2',
units=action_dim)
# mean1 = tf.get_variable(name='mean1', shape=[bs_shape, action_dim], dtype=tf.float32)
var1 = tf.get_variable(name='var1', shape=[action_dim], dtype=tf.float32,
initializer=tf.initializers.random_uniform(0.0, 1.0))
# mean2 = tf.get_variable(name='mean2', shape=[bs_shape, action_dim], dtype=tf.float32)
var2 = tf.get_variable(name='var2', shape=[action_dim], dtype=tf.float32,
initializer=tf.initializers.random_uniform(0.0, 1.0))
# var1 = tf.get_variable('logvars', (10, action_dim), tf.float32,
# tf.constant_initializer(0.0))
# var1 = tf.expand_dims(tf.reduce_sum(var1, axis=0), axis=0)
# var1 = tf.tile(var1, [bs_shape, 1])
#
# var2 = tf.get_variable('logvars2', (10, action_dim), tf.float32,
# tf.constant_initializer(0.0))
# var2 = tf.expand_dims(tf.reduce_sum(var2, axis=0), 0)
# var2 = tf.tile(var2, [bs_shape, 1])
dist_old = tfp.distributions.MultivariateNormalDiag(mean_old, tf.sqrt(var1), validate_args=True)
dis2 = tfp.distributions.MultivariateNormalDiag(mean2, tf.sqrt(var2), validate_args=True)
dist_norm1 = tfp.distributions.Normal(mean_old, var1)
dist_norm2 = tfp.distributions.Normal(mean2, var2)
print(dist_old, dis2)
# dis1 = tfp.distributions.Independent(dis1, reinterpreted_batch_ndims=1)
# dis2 = tfp.distributions.Independent(dis2, reinterpreted_batch_ndims=1)
# op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(tfp.distributions.kl_divergence(dis1, dis2),
# var_list=[mean1, var1])
ac = [env_spec.action_space.sample() for _ in range(bs_shape)]
ac = make_batch(np.array(ac), original_shape=env_spec.action_shape)
state = [env_spec.obs_space.sample() for _ in range(bs_shape)]
state = make_batch(np.array(state), original_shape=env_spec.obs_shape)
feed_dict = {
state_ph: state,
action_ph: ac
}
sess.run(tf.global_variables_initializer())
kl, entropy, logp, log_p_old = kl_entropy_logprob_from_pat_cody(old_mean=mean_old,
old_var=var1,
mean=mean2,
var=var2,
feed_dict=feed_dict,
sess=sess,
action_ph=action_ph,
action_dim=action_dim)
kl_tfp = sess.run(tf.reduce_mean(tfp.distributions.kl_divergence(dist_old, dis2)), feed_dict=feed_dict)
entropy_tfp = sess.run(tf.reduce_mean(dis2.entropy()), feed_dict=feed_dict)
log_prob_tfp = sess.run(dis2.log_prob(value=ac), feed_dict=feed_dict)
log_p_old_tfp = sess.run(dist_old.log_prob(value=ac), feed_dict=feed_dict)
test_log_prob_tfp = dis2.log_prob(ac) + tf.cast(0.5 * np.log(2. * np.pi * action_dim), dtype=tf.float32)
test_log_prob_tfp_old = dist_old.log_prob(ac) + tf.cast(0.5 * np.log(2. * np.pi * action_dim), dtype=tf.float32)
print("ac shape {}".format(ac.shape))
print("a sample from dis1 shape {}".format(sess.run(dist_old.sample(), feed_dict=feed_dict).shape))
print("shape of dis under feeddict {}".format(
sess.run([dist_old.batch_shape_tensor(), dist_old.event_shape_tensor()],
feed_dict=feed_dict)))
# print(sess.run(dis2.log_prob(value=ac)).shape)
# print(sess.run(dis1.log_prob(value=ac)).shape)
for i in range(bs_shape):
feed_dict_i = {
state_ph: make_batch(state[i], env_spec.obs_shape),
action_ph: make_batch(ac[i], env_spec.action_shape)
}
print("i dis2 log prob: {}".format(sess.run(dis2.log_prob(value=ac[i]), feed_dict=feed_dict_i)))
print("i dis1 log prob: {}".format(sess.run(dist_old.log_prob(value=ac[i]), feed_dict=feed_dict_i)))
print(kl, kl_tfp)
print(entropy, entropy_tfp)
print(logp, log_prob_tfp)
print(log_p_old, log_p_old_tfp)
print('new log p {}'.format(sess.run(test_log_prob_tfp, feed_dict=feed_dict)))
print('new log p old {}'.format(sess.run(test_log_prob_tfp_old, feed_dict=feed_dict)))
print('new log p norm {}'.format(sess.run(tf.reduce_sum(dist_norm1.log_prob(ac), axis=1), feed_dict=feed_dict)))
print('new log p old norm {}'.format(
sess.run(tf.reduce_sum(dist_norm2.log_prob(ac), axis=1), feed_dict=feed_dict)))
self.assertTrue(np.isclose(logp, log_prob_tfp).all())
self.assertTrue(np.isclose(log_p_old, log_p_old_tfp).all())
self.assertTrue(np.isclose(kl, kl_tfp).all())
self.assertTrue(np.isclose(entropy, entropy_tfp).all())
kl, entropy, logp, log_p_old = kl_entropy_logprob_from_mvn(old_mean=mean_old,
old_var=var1,
mean=mean2,
var=var2,
feed_dict=feed_dict,
sess=sess,
action_ph=action_ph,
action_dim=action_dim)
print(kl, entropy, logp, log_p_old)
self.assertTrue(np.isclose(logp, log_prob_tfp).all())
self.assertTrue(np.isclose(log_p_old, log_p_old_tfp).all())
self.assertTrue(np.isclose(entropy, entropy_tfp).all())
self.assertTrue(np.isclose(kl, kl_tfp).all())
| 10,033 | 46.330189 | 120 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_tf/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/test_gmm_dynamics_prior.py | from baconian.test.tests.set_up.setup import TestWithAll
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.common.sampler.sample_data import TransitionData
from baconian.algo.policy import UniformRandomPolicy
from baconian.algo.dynamics.gaussian_mixture_dynamics_prior import GaussianMixtureDynamicsPrior
class TestDynamicsPrior(TestWithAll):
def test_update(self):
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
data = TransitionData(env_spec=env_spec)
policy = UniformRandomPolicy(env_spec=env_spec)
# Do some initial sampling here to train gmm model
st = env.reset()
for i in range(100):
ac = policy.forward(st)
new_st, re, _, _ = env.step(ac)
data.append(state=st, new_state=new_st, action=ac, reward=re, done=False)
st = new_st
gmm = GaussianMixtureDynamicsPrior(env_spec=env_spec, batch_data=data)
gmm.init()
gmm.update(batch_data=data)
def test_prior_eval(self):
env = make('Pendulum-v0')
name = 'demo_exp'
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
data = TransitionData(env_spec=env_spec)
policy = UniformRandomPolicy(env_spec=env_spec)
# Do some initial sampling here to train gmm model
st = env.reset()
for i in range(100):
ac = policy.forward(st)
new_st, re, _, _ = env.step(ac)
data.append(state=st, new_state=new_st, action=ac, reward=re, done=False)
st = new_st
gmm = GaussianMixtureDynamicsPrior(env_spec=env_spec, batch_data=data)
gmm.init()
gmm.update(batch_data=data)
mu0, Phi, m, n0 = gmm.eval(batch_data=data)
state_shape = data.state_set.shape[1]
action_shape = data.action_set.shape[1]
self.assertEqual(state_shape + action_shape + state_shape, mu0.shape[0])
self.assertEqual(state_shape + action_shape + state_shape, Phi.shape[0])
self.assertEqual(state_shape + action_shape + state_shape, Phi.shape[1])
| 1,980 | 35.685185 | 95 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/test_reward_func.py | from baconian.common.sampler.sample_data import TransitionData
from baconian.test.tests.set_up.setup import TestWithLogSet
import numpy as np
from baconian.algo.dynamics.terminal_func.terminal_func import *
x = 0
def func():
return x
class TestRewardTerminalFunc(TestWithLogSet):
def test_all_reward_func(self):
pass
def test_all_terminal_func(self):
a = FixedEpisodeLengthTerminalFunc(max_step_length=10,
step_count_fn=func)
global x
for i in range(11):
if x == 10:
self.assertTrue(a(state=None, action=None, new_state=None))
else:
self.assertFalse(a(state=None, action=None, new_state=None))
x += 1
| 763 | 26.285714 | 76 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/test_mlp_dynamics_model.py | from baconian.common.sampler.sample_data import TransitionData
from baconian.test.tests.set_up.setup import TestWithAll
import numpy as np
class TestDynamicsModel(TestWithAll):
def test_mlp_dynamics_model(self):
mlp_dyna, local = self.create_continue_dynamics_model(name='mlp_dyna_model')
env = local['env']
env_spec = local['env_spec']
env.reset()
mlp_dyna.init()
for i in range(100):
mlp_dyna.step(action=np.array(env_spec.action_space.sample()),
state=env_spec.obs_space.sample())
data = TransitionData(env_spec)
st = env.get_state()
for i in range(10):
ac = env_spec.action_space.sample()
new_st, re, done, info = env.step(action=ac)
data.append(state=st,
action=ac,
new_state=new_st,
done=done,
reward=re)
st = new_st
print(mlp_dyna.train(batch_data=data, train_iter=10))
mlp_dyna_2, _ = self.create_continue_dynamics_model(name='model_2')
mlp_dyna_2.init()
self.assert_var_list_at_least_not_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
mlp_dyna_2.init(source_obj=mlp_dyna)
self.assert_var_list_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
mlp_dyna_2.copy_from(mlp_dyna)
self.assert_var_list_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
self.assert_var_list_id_no_equal(var_list1=mlp_dyna.parameters('tf_var_list'),
var_list2=mlp_dyna_2.parameters('tf_var_list'))
| 2,285 | 42.961538 | 95 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/test_dynamics_model.py | from baconian.test.tests.set_up.setup import TestWithAll
import numpy as np
from baconian.algo.dynamics.linear_dynamics_model import LinearDynamicsModel, LinearRegressionDynamicsModel
from baconian.common.data_pre_processing import RunningStandardScaler
class TestDynamicsModel(TestWithAll):
def test_dynamics_model(self):
real_env = self.create_env('Pendulum-v0')
x = real_env.observation_space.flat_dim
u = real_env.action_space.flat_dim
a = LinearDynamicsModel(env_spec=real_env.env_spec,
state_transition_matrix=np.ones((x,
x + u)) * 0.01,
bias=np.ones(x) * 0.02)
new_state = a.step(action=np.ones_like(real_env.action_space.sample()),
state=np.ones_like(real_env.observation_space.sample()))
print('new state', new_state)
true_new = np.ones([x]) * (x + u) * 0.01 + np.ones([x]) * 0.02
print('true state', true_new)
self.assertTrue(np.equal(true_new, new_state).all())
def test_linear_regression_model(self):
real_env = self.create_env('Pendulum-v0')
real_env.init()
x = real_env.observation_space.flat_dim
u = real_env.action_space.flat_dim
a = LinearRegressionDynamicsModel(env_spec=real_env.env_spec,
state_input_scaler=RunningStandardScaler(
dims=real_env.observation_space.flat_dim),
action_input_scaler=RunningStandardScaler(
dims=real_env.action_space.flat_dim),
state_output_scaler=RunningStandardScaler(
dims=real_env.observation_space.flat_dim))
data = self.sample_transition(env=real_env, count=100)
a.train(batch_data=data)
predict = []
for state, action in zip(data.state_set, data.action_set):
predict.append(a.step(state=state, action=action))
print(np.linalg.norm(np.array(predict) - data.new_state_set, ord=1))
print(np.linalg.norm(np.array(predict) - data.new_state_set, ord=2))
| 2,305 | 52.627907 | 107 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/__init__.py | # Date: 3/30/19
# Author: Luke
# Project: baconian-internal | 59 | 19 | 28 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_dynamics/test_gp_dynamics_model.py | from baconian.test.tests.set_up.setup import TestWithAll
import numpy as np
from baconian.algo.dynamics.gaussian_process_dynamiocs_model import GaussianProcessDyanmicsModel
from baconian.common.sampler.sample_data import TransitionData
from baconian.core.core import EnvSpec
import pandas as pd
def get_some_samples(env, num, env_spec, policy):
data = TransitionData(env_spec=env_spec)
st = env.reset()
for i in range(num):
ac = policy.forward(st)
new_st, re, _, _ = env.step(ac)
data.append(state=st, new_state=new_st, action=ac, reward=re, done=False)
st = new_st
return data
class TestDynamicsModel(TestWithAll):
# def test_more(self):
# for i in range(10):
# var = self.test_dynamics_model_in_pendulum()
# for v in var:
# del v
def test_dynamics_model_in_pendulum(self):
env = self.create_env('Pendulum-v0')
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
policy, _ = self.create_uniform_policy(env_spec=env_spec)
policy.allow_duplicate_name = True
data = get_some_samples(env=env, policy=policy, num=100, env_spec=env_spec)
gp = GaussianProcessDyanmicsModel(env_spec=env_spec, batch_data=data)
gp.allow_duplicate_name = True
gp.init()
gp.train()
print("gp first fit")
for i in range(len(data.state_set)):
res = gp.step(action=data.action_set[i],
state=data.state_set[i],
allow_clip=True)
_, var = gp._state_transit(action=data.action_set[i],
state=data.state_set[i],
required_var=True)
print(res)
print(data.new_state_set[i])
print(np.sqrt(var))
try:
self.assertTrue(np.isclose(res,
data.new_state_set[i], atol=1e-2).all())
self.assertTrue(np.greater_equal(data.new_state_set[i], res - 10.0 * np.sqrt(var)).all())
self.assertTrue(np.less_equal(data.new_state_set[i], res + 10.0 * np.sqrt(var)).all())
except Exception as e:
print(e)
print(np.isclose(res, data.new_state_set[i], atol=1e-2).all())
print(np.greater_equal(data.new_state_set[i], res - 10.0 * np.sqrt(var)).all())
print(np.less_equal(data.new_state_set[i], res + 10.0 * np.sqrt(var)).all())
raise e
lengthscales = {}
variances = {}
noises = {}
for i, model in enumerate(gp.mgpr_model.models):
lengthscales['GP' + str(i)] = model.kern.lengthscales.value
variances['GP' + str(i)] = np.array([model.kern.variance.value])
noises['GP' + str(i)] = np.array([model.likelihood.variance.value])
print('-----Learned models------')
pd.set_option('precision', 3)
print('---Lengthscales---')
print(pd.DataFrame(data=lengthscales))
print('---Variances---')
print(pd.DataFrame(data=variances))
print('---Noises---')
print(pd.DataFrame(data=noises))
# re fit the gp
print("gp re fit")
data = get_some_samples(env=env, policy=policy, num=100, env_spec=env_spec)
gp.train(batch_data=data)
for i in range(len(data.state_set)):
res = gp.step(action=data.action_set[i],
state=data.state_set[i],
allow_clip=True)
_, var = gp._state_transit(action=data.action_set[i],
state=data.state_set[i],
required_var=True)
print(res)
print(data.new_state_set[i])
print(np.sqrt(var))
try:
self.assertTrue(np.isclose(res,
data.new_state_set[i], atol=1e-1).all())
self.assertTrue(np.greater_equal(data.new_state_set[i], res - 10.0 * np.sqrt(var)).all())
self.assertTrue(np.less_equal(data.new_state_set[i], res + 10.0 * np.sqrt(var)).all())
except Exception as e:
print(e)
print(np.isclose(res, data.new_state_set[i], atol=1e-1).all())
print(np.greater_equal(data.new_state_set[i], res - 10.0 * np.sqrt(var)).all())
print(np.less_equal(data.new_state_set[i], res + 10.0 * np.sqrt(var)).all())
raise e
# do test
print("gp test")
data = get_some_samples(env=env, policy=policy, num=100, env_spec=env_spec)
for i in range(len(data.state_set)):
res = gp.step(action=data.action_set[i],
state=data.state_set[i],
allow_clip=True)
_, var = gp._state_transit(action=data.action_set[i],
state=data.state_set[i],
required_var=True)
print(res)
print(data.new_state_set[i])
print(np.sqrt(var))
print('l1 loss {}'.format(np.linalg.norm(data.new_state_set[i] - res, 1)))
return locals()
| 5,325 | 43.756303 | 105 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_core/test_core.py | from baconian.config.global_config import GlobalConfig
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.error import *
from baconian.algo.dynamics.reward_func.reward_func import RandomRewardFunc
from baconian.core.core import Basic
class TestCore(BaseTestCase):
def test_global_config(self):
GlobalConfig().set_new_config(config_dict=dict(DEFAULT_BASIC_INIT_STATUS='test'))
assert GlobalConfig().DEFAULT_BASIC_INIT_STATUS == 'test'
GlobalConfig().freeze_flag = True
try:
GlobalConfig().set_new_config(config_dict=dict(DEFAULT_BASIC_INIT_STATUS='test'))
except AttemptToChangeFreezeGlobalConfigError:
pass
else:
raise TypeError
try:
GlobalConfig().set('DEFAULT_LOG_PATH', 'tmp')
except AttemptToChangeFreezeGlobalConfigError:
pass
else:
raise TypeError
try:
GlobalConfig().DEFAULT_LOG_PATH = 'tmp'
except AttemptToChangeFreezeGlobalConfigError:
pass
else:
raise TypeError
GlobalConfig().unfreeze()
def test_config(self):
config, _ = self.create_dict_config()
def test_name_register(self):
a = RandomRewardFunc()
self.assertTrue(a.allow_duplicate_name)
b = RandomRewardFunc()
a = Basic(name='s')
try:
b = Basic(name='s')
except GlobalNameExistedError as e:
pass
else:
raise NotCatchCorrectExceptionError()
| 1,564 | 30.938776 | 93 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_core/test_parameters.py | from baconian.config.global_config import GlobalConfig
from baconian.test.tests.set_up.setup import TestWithLogSet
import numpy as np
from baconian.core.parameters import Parameters
from baconian.common.schedules import LinearScheduler, PiecewiseScheduler, PeriodicalEventSchedule
x = 0
class TestParam(TestWithLogSet):
def test_basic(self):
a, locals = self.create_parameters()
a.save(save_path=GlobalConfig().DEFAULT_LOG_PATH + '/param_path',
name=a.name,
global_step=0)
or_val = a._source_config.config_dict['var1']
or_param = a('param3').copy()
a._source_config.config_dict['var1'] = 100
a._parameters['param3'] = 1000
self.assertNotEqual(a._source_config.config_dict['var1'], or_val)
self.assertFalse(np.equal(a._parameters['param3'], or_param).all())
a.load(load_path=GlobalConfig().DEFAULT_LOG_PATH + '/param_path',
name=a.name,
global_step=0
)
self.assertEqual(a._source_config.config_dict['var1'], or_val)
self.assertTrue(np.equal(a._parameters['param3'], or_param).all())
b, _ = self.create_parameters()
b.copy_from(a)
for key in a._source_config.required_key_dict.keys():
if isinstance(a[key], np.ndarray):
self.assertTrue(np.equal(a[key], b[key]).all())
else:
self.assertEqual(id(a[key]), id(b[key]))
self.assertEqual(id(a(key)), id(b(key)))
for key in a._parameters.keys():
if isinstance(a[key], np.ndarray):
self.assertTrue(np.equal(a[key], b[key]).all())
else:
self.assertEqual(a[key], b[key])
self.assertEqual(a(key), b(key))
def test_scheduler_param(self):
def func():
global x
return x
parameters = dict(param1='aaaa',
param2=1.0,
param4=1.0,
param3=np.random.random([4, 2]))
source_config, _ = self.create_dict_config()
a = Parameters(parameters=parameters,
source_config=source_config,
name='test_params',
to_scheduler_param_tuple=(dict(param_key='param2',
scheduler=LinearScheduler(t_fn=func,
schedule_timesteps=10,
final_p=0.0)),
dict(param_key='param4',
scheduler=PiecewiseScheduler(t_fn=func,
endpoints=(
(2, 0.5), (8, 0.2), (10, 0.0)),
outside_value=0.0,
))))
a.init()
for i in range(20):
global x
if x < 10:
self.assertEqual(a('param2'), 1.0 - x * (1.0 - 0.0) / 10)
else:
self.assertEqual(a('param2'), 0.0)
if x == 2:
self.assertEqual(a('param4'), 0.5)
if x == 8:
self.assertEqual(a('param4'), 0.2)
if x >= 10:
self.assertEqual(a('param4'), 0.0)
x += 1
b, _ = self.create_parameters()
b.copy_from(a)
for key in a._source_config.required_key_dict.keys():
if isinstance(a[key], np.ndarray):
self.assertTrue(np.equal(a[key], b[key]).all())
else:
self.assertEqual(id(a[key]), id(b[key]))
self.assertEqual(id(a(key)), id(b(key)))
for key in a._parameters.keys():
if isinstance(a[key], np.ndarray):
self.assertTrue(np.equal(a[key], b[key]).all())
else:
self.assertEqual(a[key], b[key])
self.assertEqual(a(key), b(key))
self.assertEqual(a.to_scheduler_param_list.__len__(), b.to_scheduler_param_list.__len__())
for a_val, b_val in zip(a.to_scheduler_param_list, b.to_scheduler_param_list):
self.assertEqual(a_val['param_key'], b_val['param_key'])
self.assertEqual(a_val['scheduler'].value(), b_val['scheduler'].value())
def test_event_schedule(self):
def func():
global x
return x
a = PeriodicalEventSchedule(t_fn=func,
trigger_every_step=5,
after_t=10)
for i in range(100):
if i % 5 != 0 or i < 10:
self.assertFalse(a.value())
else:
self.assertTrue(a.value())
global x
x += 1
| 5,106 | 43.408696 | 117 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_core/test_exp.py | from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.core.experiment_runner import single_exp_runner, duplicate_exp_runner
from baconian.common.schedules import LinearScheduler
from baconian.config.global_config import GlobalConfig
import os
class TestExperiment(BaseTestCase):
def test_experiment(self):
def func():
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=200,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None))
dqn, locals = self.create_dqn()
env_spec = locals['env_spec']
env = locals['env']
agent = self.create_agent(env=locals['env'],
algo=dqn,
name='agent',
eps=self.create_eps(env_spec)[0],
env_spec=env_spec)[0]
exp = self.create_exp(name='model_free', env=env, agent=agent)
exp.run()
single_exp_runner(func, auto_choose_gpu_flag=False, gpu_id=0, del_if_log_path_existed=True)
def test_exp_with_scheduler(self, algo=None, locals=None):
def wrap_algo(algo=None, locals=None):
def func(algo=algo, locals=locals):
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=500,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None))
if not algo:
algo, locals = self.create_dqn()
env_spec = locals['env_spec']
env = locals['env']
agent = self.create_agent(env=locals['env'],
algo=algo,
name='agent',
eps=self.create_eps(env_spec)[0],
env_spec=env_spec)[0]
exp = self.create_exp(name='model_free', env=env, agent=agent)
algo.parameters.set_scheduler(param_key='LEARNING_RATE',
to_tf_ph_flag=True,
scheduler=LinearScheduler(
t_fn=exp.TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT,
schedule_timesteps=GlobalConfig().DEFAULT_EXPERIMENT_END_POINT[
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'],
final_p=0.0001,
initial_p=0.01))
exp.run()
self.assertEqual(exp.TOTAL_AGENT_TEST_SAMPLE_COUNT(), exp.TOTAL_ENV_STEP_TEST_SAMPLE_COUNT())
self.assertEqual(exp.TOTAL_AGENT_TRAIN_SAMPLE_COUNT(), exp.TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT(), 500)
return func
single_exp_runner(wrap_algo(algo, locals), auto_choose_gpu_flag=False, gpu_id=0,
del_if_log_path_existed=True)
def test_duplicate_exp(self):
def func():
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=500,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None))
dqn, locals = self.create_dqn()
env_spec = locals['env_spec']
env = locals['env']
agent = self.create_agent(env=locals['env'],
algo=dqn,
name='agent',
eps=self.create_eps(env_spec)[0],
env_spec=env_spec)[0]
exp = self.create_exp(name='model_free', env=env, agent=agent)
exp.run()
base_path = GlobalConfig().DEFAULT_LOG_PATH
duplicate_exp_runner(2, func, auto_choose_gpu_flag=False, gpu_id=0)
self.assertTrue(os.path.isdir(base_path))
self.assertTrue(os.path.isdir(os.path.join(base_path, 'exp_0')))
self.assertTrue(os.path.isdir(os.path.join(base_path, 'exp_1')))
self.assertTrue(os.path.isdir(os.path.join(base_path, 'exp_0', 'record')))
self.assertTrue(os.path.isdir(os.path.join(base_path, 'exp_1', 'record')))
self.assertTrue(os.path.isfile(os.path.join(base_path, 'exp_0', 'console.log')))
self.assertTrue(os.path.isfile(os.path.join(base_path, 'exp_1', 'console.log')))
def test_saving_scheduler_on_all_model_free_algo(self):
to_test_algo_func = (self.create_ppo, self.create_dqn, self.create_ddpg)
sample_traj_flag = (True, False, False)
for i, func in enumerate(to_test_algo_func):
self.setUp()
single_exp_runner(_saving_scheduler(self, func,
sample_traj_flag=sample_traj_flag[i]),
auto_choose_gpu_flag=False,
gpu_id=0,
del_if_log_path_existed=True)
self.tearDown()
def test_saving_scheduler_on_all_model_based_algo(self):
to_test_algo_func = (self.create_mpc, self.create_dyna)
for func in to_test_algo_func:
self.setUp()
single_exp_runner(_saving_scheduler(self, func), auto_choose_gpu_flag=False,
gpu_id=0, del_if_log_path_existed=True)
self.tearDown()
def _saving_scheduler(self, creat_func=None, sample_traj_flag=False):
def wrap_algo():
def func(self, creat_func=None):
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT', dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=500,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None))
if not creat_func:
algo, locals = self.create_dqn()
else:
algo, locals = creat_func()
env_spec = locals['env_spec']
env = locals['env']
agent = self.create_agent(env=locals['env'],
algo=algo,
name='agent',
eps=self.create_eps(env_spec)[0],
env_spec=env_spec)[0]
flow = None
from baconian.algo.dyna import Dyna
if isinstance(algo, Dyna):
flow = self.create_dyna_flow(agent=agent, env=env)[0]
exp = self.create_exp(name='model_free', env=env, agent=agent, flow=flow, traj_flag=sample_traj_flag)
exp.run()
self.assertEqual(exp.TOTAL_AGENT_TEST_SAMPLE_COUNT(), exp.TOTAL_ENV_STEP_TEST_SAMPLE_COUNT())
self.assertEqual(exp.TOTAL_AGENT_TRAIN_SAMPLE_COUNT(), exp.TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT(), 500)
return func(self, creat_func)
return wrap_algo
| 7,421 | 53.175182 | 116 | py |
baconian-project | baconian-project-master/baconian/test/tests/test_core/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/benchmark/visualisation.py | from baconian.common.log_data_loader import *
if __name__ == "__main__":
MultipleExpLogDataLoader(
exp_root_dir_list='/Users/lukeeeeee/Code/baconian-internal/baconian/benchmark/benchmark_log/Pendulum-v0/ppo/2020-05-05_09-35-28') \
.plot_res(sub_log_dir_name='benchmark_agent/TEST',
key='sum_reward',
index='sample_counter',
mode='line',
average_over=1,
file_name=None,
save_format='png',
save_path='./')
| 552 | 35.866667 | 139 | py |
baconian-project | baconian-project-master/baconian/benchmark/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/benchmark/get_result.py | import glob
import os
import json_tricks as json
import numpy as np
def get_reward_json(root_dir_list, sub_dir, key, index_key):
all_res = []
for rt in root_dir_list:
with open(file=os.path.join(rt, sub_dir)) as f:
res = json.load(f)[key]
val = [rr['value'] for rr in res]
index = [rr[index_key] for rr in res]
all_res.append((val, index))
print(val[-5:], rt)
aver = 0.0
for re in all_res:
aver += float(np.mean(re[0][-5:]))
aver /= len(all_res)
print(aver)
if __name__ == '__main__':
get_reward_json(
root_dir_list=glob.glob('/home/dls/CAP/baconian-internal/benchmark/benchmark_log/Pendulum-v0/dyna/**/*'),
sub_dir='record/benchmark_agent/TEST/log.json',
key='sum_reward',
index_key='predict_counter'
)
| 849 | 26.419355 | 113 | py |
baconian-project | baconian-project-master/baconian/benchmark/run_benchmark.py | from baconian.benchmark.ddpg_benchmark import mountiancar_task_fn, pendulum_task_fn
from baconian.benchmark.dyna_benchmark import dyna_pendulum_task_fn
from baconian.benchmark.mpc_benchmark import mpc_pendulum_task_fn
from baconian.benchmark.ppo_benchmark import inverted_double_pendulum_bullet_env_task_fn, \
inverted_pendulum_bullet_env_task_fn, pendulum_env_task_fn, half_cheetah_bullet_env_task_fn
from baconian.benchmark.iLQR_benchmark import ilqr_pendulum_task_fn
from baconian.benchmark.dqn_benchmark import acrobot_task_fn, lunarlander_task_fn
import argparse
import os
import time
from baconian.config.global_config import GlobalConfig
from baconian.core.experiment_runner import duplicate_exp_runner
arg = argparse.ArgumentParser()
env_id_to_task_fn = {
'Pendulum-v0': {
'ddpg': pendulum_task_fn,
'dyna': dyna_pendulum_task_fn,
'mpc': mpc_pendulum_task_fn,
'ppo': pendulum_env_task_fn,
'ilqr': ilqr_pendulum_task_fn
},
'MountainCarContinuous-v0': {
'ddpg': mountiancar_task_fn,
},
'HalfCheetahBulletEnv-v0': {
'ppo': half_cheetah_bullet_env_task_fn,
},
'Acrobot-v1': {
'dqn': acrobot_task_fn,
},
'LunarLander-v2': {
'dqn': lunarlander_task_fn,
},
'InvertedPendulumBulletEnv-v0': {
'ppo': inverted_pendulum_bullet_env_task_fn
},
'InvertedDoublePendulumBulletEnv-v0': {
'ppo': inverted_double_pendulum_bullet_env_task_fn
}
}
alog_list = ['ddpg', 'dyna', 'mpc', 'ppo', 'ilqr', 'dqn']
arg.add_argument('--env_id', type=str, choices=list(env_id_to_task_fn.keys()))
arg.add_argument('--algo', type=str, choices=alog_list)
arg.add_argument('--count', type=int, default=1)
arg.add_argument('--cuda_id', type=int, default=-1)
args = arg.parse_args()
if __name__ == '__main__':
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
GlobalConfig().set('DEFAULT_LOG_PATH', os.path.join(CURRENT_PATH, 'benchmark_log', args.env_id, args.algo,
time.strftime("%Y-%m-%d_%H-%M-%S")))
ExpRootPath = GlobalConfig().DEFAULT_LOG_PATH
duplicate_exp_runner(args.count, env_id_to_task_fn[args.env_id][args.algo], gpu_id=args.cuda_id)
| 2,249 | 37.793103 | 110 | py |
baconian-project | baconian-project-master/baconian/benchmark/mpc_benchmark/pendulum.py | """
MPC benchmark on Pendulum
"""
from baconian.benchmark.mpc_benchmark.pendulum_conf import *
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.envs.envs_reward_func import REWARD_FUNC_DICT
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.algo.mpc import ModelPredictiveControl
from baconian.algo.policy import UniformRandomPolicy
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
def pendulum_task_fn():
exp_config = PENDULUM_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Pendulum-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
**exp_config['DynamicsModel']
)
algo = ModelPredictiveControl(
dynamics_model=mlp_dyna,
env_spec=env_spec,
config_or_config_dict=exp_config['MPC'],
name=name + '_mpc',
policy=UniformRandomPolicy(env_spec=env_spec, name='uni_policy')
)
algo.set_terminal_reward_function_for_dynamics_env(reward_func=REWARD_FUNC_DICT['Pendulum-v0'](),
terminal_func=FixedEpisodeLengthTerminalFunc(
max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn), )
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
exploration_strategy=None,
noise_adder=None,
name=name + '_agent')
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['DynaFlow'],
func_dict={
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict()},
'train_algo': None,
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=100, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=10,
env=agent.env,
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': None,
'train_algo_from_synthesized_data': None
}
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 3,572 | 40.546512 | 114 | py |
baconian-project | baconian-project-master/baconian/benchmark/mpc_benchmark/pendulum_conf.py | PENDULUM_BENCHMARK_CONFIG_DICT = {
'env_id': "Pendulum-v0",
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=10000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
'DynaFlow': {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 200,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 200,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 10,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 2000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 10,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 50,
},
'DynamicsModel': dict(learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 3,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]),
'MPC': dict(
SAMPLED_HORIZON=5,
SAMPLED_PATH_NUM=20,
dynamics_model_train_iter=10
),
}
| 2,219 | 42.529412 | 78 | py |
baconian-project | baconian-project-master/baconian/benchmark/mpc_benchmark/__init__.py | from baconian.benchmark.mpc_benchmark.pendulum import pendulum_task_fn as mpc_pendulum_task_fn
| 95 | 47 | 94 | py |
baconian-project | baconian-project-master/baconian/benchmark/iLQR_benchmark/pendulum.py | """
iLQR benchmark on Pendulum
"""
from baconian.benchmark.iLQR_benchmark.pendulum_conf import *
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.envs.envs_reward_func import REWARD_FUNC_DICT
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.algo.policy.ilqr_policy import iLQRPolicy, iLQRAlogWrapper
from baconian.algo.dynamics.reward_func.reward_func import RewardFuncCostWrapper
from baconian.algo.dynamics.dynamics_model import DynamicsEnvWrapper
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
def pendulum_task_fn():
exp_config = PENDULUM_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Pendulum-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
**exp_config['DynamicsModel']
)
dyna_env = DynamicsEnvWrapper(mlp_dyna)
dyna_env.set_terminal_reward_func(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=dyna_env.total_step_count_fn),
reward_func=REWARD_FUNC_DICT['Pendulum-v0']())
policy = iLQRPolicy(env_spec=env_spec,
**exp_config['ILQR'],
dynamics=dyna_env,
cost_fn=RewardFuncCostWrapper(reward_func=REWARD_FUNC_DICT['Pendulum-v0']()))
algo = iLQRAlogWrapper(policy=policy,
env_spec=env_spec,
dynamics_env=dyna_env)
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
exploration_strategy=None,
noise_adder=None,
name=name + '_agent')
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['DynaFlow'],
func_dict={
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_dynamics_training')},
'train_algo': None,
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=100, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=10,
env=agent.env,
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': None,
'train_algo_from_synthesized_data': None
}
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 3,758 | 40.307692 | 102 | py |
baconian-project | baconian-project-master/baconian/benchmark/iLQR_benchmark/pendulum_conf.py | PENDULUM_BENCHMARK_CONFIG_DICT = {
'env_id': "Pendulum-v0",
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=10000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
'DynaFlow': {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 2000,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 200,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 10,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 2000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 10,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 50,
},
'DynamicsModel': dict(learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 3,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]),
'ILQR': dict(
T=10,
delta=0.05,
iteration=2,
dynamics_model_train_iter=10
),
}
| 2,221 | 40.924528 | 78 | py |
baconian-project | baconian-project-master/baconian/benchmark/iLQR_benchmark/__init__.py | from baconian.benchmark.iLQR_benchmark.pendulum import pendulum_task_fn as ilqr_pendulum_task_fn
| 97 | 48 | 96 | py |
baconian-project | baconian-project-master/baconian/benchmark/ppo_benchmark/mujoco_bullet_env_conf.py | import numpy as np
def make_config(obs_dim, action_dim, policy_hid1_multi, value_hid3_size, value_hid1_multi, episode_per_sample,
total_episode, episode_length=1000):
OBS_DIM = obs_dim + 1
HID1_MULT = value_hid1_multi
HID3_SIZE = value_hid3_size
HID1_SIZE = OBS_DIM * HID1_MULT
HID2_SIZE = int(np.sqrt(HID1_SIZE * HID3_SIZE))
POLICY_HID_MULTI = policy_hid1_multi
ACT_DIM = action_dim
POLICY_HID3_SIZE = ACT_DIM * 10
POLICY_HID1_SIZE = OBS_DIM * POLICY_HID_MULTI
POLICY_HID2_SIZE = int(np.sqrt(POLICY_HID1_SIZE * POLICY_HID3_SIZE))
CONFIG_DICT = {
'env_id': "HalfCheetahBulletEnv-v0",
'MLP_V': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID1_SIZE)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": HID3_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID2_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID3_SIZE),
}
]
},
'POLICY': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": POLICY_HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": POLICY_HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID1_SIZE)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": POLICY_HID3_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID2_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": ACT_DIM,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID3_SIZE)
}
]
},
'PPO': {
'config_or_config_dict': {
"value_func_memory_size": int(total_episode * episode_length / 10),
"gamma": 0.995,
"lam": 0.98,
"policy_train_iter": 20,
"value_func_train_iter": 10,
"clipping_range": None,
"beta": 1.0,
"eta": 50,
"log_var_init": -1.0,
"kl_target": 0.003,
"policy_lr": 9e-4 / np.sqrt(POLICY_HID2_SIZE),
"value_func_lr": 1e-2 / np.sqrt(HID2_SIZE),
"value_func_train_batch_size": 256,
"lr_multiplier": 1.0
}
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 5,
"TRAIN_SAMPLES_COUNT": episode_per_sample,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 1,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 0,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(
TOTAL_AGENT_TRAIN_SAMPLE_FUNC_COUNT=int(total_episode / episode_per_sample),
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
return CONFIG_DICT
| 4,473 | 34.507937 | 110 | py |
baconian-project | baconian-project-master/baconian/benchmark/ppo_benchmark/__init__.py | # from baconian.benchmark.ppo_benchmark.halfCheetah import half_cheetah_task_fn
# from baconian.benchmark.ppo_benchmark.pendulum import pendulum_task_fn
# from baconian.benchmark.ppo_benchmark.reacher import reacher_task_fn
# from baconian.benchmark.ppo_benchmark.swimmer import swimmer_task_fn
# from baconian.benchmark.ppo_benchmark.hopper import hopper_task_fn
# from baconian.benchmark.ppo_benchmark.inverted_pendulum import inverted_pendulum_task_fn
# from baconian.benchmark.ppo_benchmark.halfCheetah_pybullet import \half_cheetah_task_fn as half_cheetah_bullet_env_task_fn
from baconian.benchmark.ppo_benchmark.mujoco_bullet_env import half_cheetah_bullet_env_task_fn, \
inverted_pendulum_bullet_env_task_fn, inverted_double_pendulum_bullet_env_task_fn, pendulum_env_task_fn
| 786 | 77.7 | 124 | py |
baconian-project | baconian-project-master/baconian/benchmark/ppo_benchmark/mujoco_bullet_env.py | """
PPO benchmark on HalfCheetahBulletEnv-v0
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func import MLPVValueFunc
from baconian.algo.ppo import PPO
from baconian.algo.policy.normal_distribution_mlp import NormalDistributionMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.envs.env_wrapper import StepObservationWrapper
from baconian.benchmark.ppo_benchmark.mujoco_bullet_env_conf import make_config
def make_task_fn(env_id, total_episode, episode_per_sample):
def func():
env = make(env_id)
exp_config = make_config(obs_dim=env.env_spec.flat_obs_dim,
action_dim=env.env_spec.flat_action_dim,
policy_hid1_multi=10,
value_hid3_size=5,
value_hid1_multi=10,
total_episode=total_episode,
episode_length=1000,
episode_per_sample=episode_per_sample)
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env.reset()
env = StepObservationWrapper(env, step_limit=env.unwrapped._max_episode_steps)
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_v = MLPVValueFunc(env_spec=env_spec,
name_scope=name + 'mlp_v',
name=name + 'mlp_v',
**exp_config['MLP_V'])
policy = NormalDistributionMLPPolicy(env_spec=env_spec,
name_scope=name + 'mlp_policy',
name=name + 'mlp_policy',
**exp_config['POLICY'],
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
reuse=False)
ppo = PPO(
env_spec=env_spec,
**exp_config['PPO'],
value_func=mlp_v,
stochastic_policy=policy,
name=name + '_ppo',
use_time_index_flag=True
)
agent = Agent(env=env,
env_spec=env_spec,
algo=ppo,
exploration_strategy=None,
noise_adder=None,
name=name + '_agent')
flow = TrainTestFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_FUNC_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
sample_type='trajectory',
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
return func
half_cheetah_bullet_env_task_fn = make_task_fn(env_id='HalfCheetahBulletEnv-v0', total_episode=5000,
episode_per_sample=5)
inverted_pendulum_bullet_env_task_fn = make_task_fn(env_id='InvertedPendulumBulletEnv-v0', total_episode=1000,
episode_per_sample=20)
inverted_double_pendulum_bullet_env_task_fn = make_task_fn(env_id='InvertedDoublePendulumBulletEnv-v0',
total_episode=5000, episode_per_sample=20)
pendulum_env_task_fn = make_task_fn(env_id='Pendulum-v0', total_episode=1000, episode_per_sample=20)
| 4,866 | 43.651376 | 111 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/maxmountaincar.py | """
Calculate the max sum reward for mountaincar experiment based across multiple trials
"""
import json
# Declare number of trials here (Example: if numTrials is 10, it will search through trial 1-10)
numTrials = 10
maxRewardArray = []
for i in range(numTrials):
with open('./mountain_log_path' + str(i+1) + '/record/benchmark_agent/TEST/log.json', 'r') as f:
result_dict = json.load(f)
x = []
for result in result_dict["sum_reward"]:
x.append(result["value"]["__ndarray__"])
print("TESTSET: " + str(i+1) + " - MAX:", max(x))
maxRewardArray.append(max(x))
print("-----AVERAGE OF 10 Test Sets: ", max(maxRewardArray), "-------")
| 674 | 24.961538 | 100 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/pendulum.py | """
DDPG benchmark on Pendulum
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.benchmark.ddpg_benchmark.pendulum_conf import *
from baconian.core.status import get_global_status_collect
from baconian.common.noise import *
from baconian.common.schedules import *
def pendulum_task_fn():
exp_config = PENDULUM_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Pendulum-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['DeterministicMLPPolicy'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OrnsteinUhlenbeckActionNoise(np.zeros(1,), 0.15),
noise_weight_scheduler=ConstantScheduler(value=0.3),
action_weight_scheduler=ConstantScheduler(value=1.0)),
name=name + '_agent')
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
if __name__ == "__main__":
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './mountain_log_path')
single_exp_runner(pendulum_task_fn, del_if_log_path_existed=True)
| 3,952 | 42.43956 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/average_calculator.py | """
Calculate the average sum reward for experiments based across multiple trials for the last numSamples tests
"""
import json
import collections
# Declare the number of tests results to sample from (takes results starting from the end of experiments)
numSamples = 100
sumRewardArray = []
for i in range(10):
with open('./pendulum_log_path_new' + str(i+1) + '/record/benchmark_agent/TEST/log.json', 'r') as f:
result_dict = json.load(f)
x = collections.deque(numSamples * [0], numSamples)
for result in result_dict["sum_reward"]:
# print(result["log_val"]["__ndarray__"])
x.append(result["value"]["__ndarray__"])
print("TESTSET: " + str(i+1) + " - AVERAGE OF LAST", numSamples, "sum rewards: ", sum(x) / numSamples)
sumRewardArray.append(sum(x) / numSamples)
print("-----AVERAGE OF 10 Test Sets: ", sum(sumRewardArray)/10, "-------")
| 889 | 30.785714 | 108 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/mountaincar.py | """
DDPG bechmark on Mountain Car
"""
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.core.status import get_global_status_collect
from baconian.common.noise import *
from baconian.common.schedules import *
from baconian.benchmark.ddpg_benchmark.mountain_car_continuous_conf import MOUNTAIN_CAR_CONTINUOUS_BENCHMARK_CONFIG_DICT
def mountiancar_task_fn():
exp_config = MOUNTAIN_CAR_CONTINUOUS_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('MountainCarContinuous-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['DeterministicMLPPolicy'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
n_actions = env.action_space.shape[0]
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OrnsteinUhlenbeckActionNoise(mu=np.zeros(n_actions),
sigma=0.5 * np.ones(
n_actions)),
noise_weight_scheduler=ConstantScheduler(value=1),
action_weight_scheduler=ConstantScheduler(value=1.0)),
reset_noise_every_terminal_state=True,
name=name + '_agent')
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
if __name__ == "__main__":
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './mountain_log_path')
single_exp_runner(mountiancar_task_fn, del_if_log_path_existed=True) | 4,363 | 45.425532 | 120 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/pendulum_conf.py | PENDULUM_BENCHMARK_CONFIG_DICT = {
'env_id': "Pendulum-v0",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 200,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DeterministicMLPPolicy': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.0001,
"DECAY": 0.5,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 1,
"TRAIN_SAMPLES_COUNT": 20,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 10,
"TRAIN_EVERY_SAMPLE_COUNT": 10,
"START_TRAIN_AFTER_SAMPLE_COUNT": 0,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'EpsilonGreedy': {
'initial_p': 1.0,
'final_p': 0.0,
'schedule_timesteps': 10000
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=10000)
} | 2,999 | 27.571429 | 78 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/halfCheetah_conf.py | from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
import numpy as np
env = make("HalfCheetah-v2")
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
OBS_DIM = env_spec.flat_obs_dim
HID1_SIZE = 400
HID2_SIZE = 300
POLICY_HID_MULTI = 10
ACT_DIM = env_spec.flat_action_dim
POLICY_HID1_SIZE = 400
POLICY_HID2_SIZE = 300
CHEETAH_BENCHMARK_CONFIG_DICT = {
'env_id': "HalfCheetah-v2",
'MLP_V': {
'mlp_config': [
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID1_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID2_SIZE),
}
]
},
'POLICY': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": POLICY_HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": POLICY_HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID1_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": ACT_DIM,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID2_SIZE)
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 1000000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.0001,
"DECAY": 0.999,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 10,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 1000,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 10000,
"START_TEST_AFTER_SAMPLE_COUNT": 20,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=1000000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
'AGENT_NOISE': {
}
} | 3,137 | 28.603774 | 82 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/__init__.py | from baconian.benchmark.ddpg_benchmark.pendulum import pendulum_task_fn
from baconian.benchmark.ddpg_benchmark.mountaincar import mountiancar_task_fn
| 150 | 49.333333 | 77 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/hopper_conf.py | from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
import numpy as np
env = make("Hopper-v2")
env_spec = EnvSpec(obs_space=env.observation_space, action_space=env.action_space)
OBS_DIM = env_spec.flat_obs_dim
HID1_SIZE = 100
HID2_SIZE = 100
POLICY_HID_MULTI = 10
ACT_DIM = env_spec.flat_action_dim
POLICY_HID1_SIZE = HID1_SIZE
POLICY_HID2_SIZE = HID2_SIZE
HOPPER_BENCHMARK_CONFIG_DICT = {
'env_id': "Hopper-v2",
'MLP_V': {
'mlp_config': [
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID1_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / HID2_SIZE),
}
]
},
'POLICY': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": POLICY_HID1_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / OBS_DIM)
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": POLICY_HID2_SIZE,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID1_SIZE)
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": ACT_DIM,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": np.sqrt(1 / POLICY_HID2_SIZE)
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 1000000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.0001,
"ACTOR_LEARNING_RATE": 0.00001,
"DECAY": 0.999,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 10,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 1000,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 10000,
"START_TEST_AFTER_SAMPLE_COUNT": 20,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=100000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
'AGENT_NOISE': {
}
} | 3,139 | 28.622642 | 82 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/halfCheetah.py | """
DDPG bechmark on HalfCheetah
"""
import sys
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.core.status import get_global_status_collect
from baconian.common.noise import *
from baconian.common.schedules import *
from baconian.benchmark.ddpg_benchmark.halfCheetah_conf import CHEETAH_BENCHMARK_CONFIG_DICT
def halfcheetah_task_fn():
exp_config = CHEETAH_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('HalfCheetah-v2')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLP_V'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['POLICY'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OUNoise(theta=0.15, sigma= 0.3),
noise_weight_scheduler=ConstantScheduler(1),
action_weight_scheduler=ConstantScheduler(1), ),
name=name + '_agent')
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './half_cheetah_log_path' + sys.argv[1])
single_exp_runner(halfcheetah_task_fn, del_if_log_path_existed=True)
| 3,923 | 41.193548 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/hopper.py | """
DDPG bechmark on Hopper
"""
import sys
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.benchmark.ddpg_benchmark.hopper_conf import HOPPER_BENCHMARK_CONFIG_DICT
from baconian.core.status import get_global_status_collect
from baconian.common.noise import *
from baconian.common.schedules import *
def hopper_task_fn():
exp_config = HOPPER_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Hopper-v2')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLP_V'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['POLICY'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
agent = Agent(env=env, env_spec=env_spec,
algo=ddpg,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OUNoise(theta=0.15, sigma= 0.3),
noise_weight_scheduler=ConstantScheduler(1),
action_weight_scheduler=ConstantScheduler(1), ),
name=name + '_agent')
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
from baconian.core.experiment_runner import *
GlobalConfig().set('DEFAULT_LOG_PATH', './hopper_log_path' + sys.argv[1])
single_exp_runner(hopper_task_fn, del_if_log_path_existed=True)
| 3,890 | 40.83871 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/ddpg_benchmark/mountain_car_continuous_conf.py | MOUNTAIN_CAR_CONTINUOUS_BENCHMARK_CONFIG_DICT = {
'env_id': 'MountainCarContinuous-v0',
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 128,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DeterministicMLPPolicy': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 25000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.0001,
"ACTOR_LEARNING_RATE": 0.00001,
"DECAY": 0.99,
"BATCH_SIZE": 8,
"TRAIN_ITERATION": 50,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 1,
"TRAIN_SAMPLES_COUNT": 100,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 100,
"TRAIN_EVERY_SAMPLE_COUNT": 100,
"START_TRAIN_AFTER_SAMPLE_COUNT": 0,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=300000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
| 2,606 | 28.292135 | 79 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/inverted_pendulum.py | """
Dyna benchmark on Pendulum
"""
from baconian.benchmark.dyna_benchmark.inverted_pendulum_conf import \
INVERTED_PENDULUM_BENCHMARK_CONFIG_DICT as exp_config
from baconian.common.noise import *
from baconian.common.schedules import *
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.dyna import Dyna
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.envs.envs_reward_func import REWARD_FUNC_DICT
def inverted_pendulum_task_fn():
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make(exp_config['env_id'])
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['DeterministicMLPPolicy'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
**exp_config['DynamicsModel']
)
algo = Dyna(env_spec=env_spec,
name=name + '_dyna_algo',
model_free_algo=ddpg,
dynamics_model=mlp_dyna,
config_or_config_dict=dict(
dynamics_model_train_iter=10,
model_free_algo_train_iter=10
))
algo.set_terminal_reward_function_for_dynamics_env(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn),
reward_func=REWARD_FUNC_DICT['Pendulum-v0']())
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OrnsteinUhlenbeckActionNoise(np.zeros(1, ), 0.15),
noise_weight_scheduler=ConstantScheduler(value=0.3),
action_weight_scheduler=ConstantScheduler(value=1.0)),
name=name + '_agent')
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['DynaFlow'],
func_dict={
'train_algo': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_agent_training')},
'train_algo_from_synthesized_data': {'func': agent.train,
'args': list(),
# TODO use a decomposed way to represetn the state
# e.g., TRAIN:AGENT:CYBER
'kwargs': dict(state='state_agent_training', train_iter=1)},
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_dynamics_training')},
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=10, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
env=agent.env,
sample_type='transition',
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
sample_type='transition',
env=agent.algo.dynamics_env,
in_which_status='TRAIN',
store_flag=False)}
}
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 5,951 | 46.238095 | 110 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/mountaincar_conf.py | MOUNTAIN_CAR_BENCHMARK_CONFIG_DICT = {
'env_id': "MountainCarContinuous-v0",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 200,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DeterministicMLPPolicy': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.0001,
"DECAY": 0.5,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'initial_p': 1.0,
'final_p': 0.0,
'schedule_timesteps': 10000
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT=10000),
'DynaFlow': {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 100,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 100,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 20,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 10000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 20,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 20,
},
'DynamicsModel': dict(learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 3,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
}
| 4,502 | 32.110294 | 82 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/inverted_pendulum_conf.py | INVERTED_PENDULUM_BENCHMARK_CONFIG_DICT = {
'env_id': "InvertedPendulum-v2",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 200,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DeterministicMLPPolicy': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.0001,
"DECAY": 0.5,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'initial_p': 1.0,
'final_p': 0.0,
'schedule_timesteps': 10000
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT=10000),
'DynaFlow': {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 100,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 100,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 20,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 10000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 20,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 20,
},
'DynamicsModel': dict(learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 3,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
}
| 4,502 | 32.110294 | 82 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/pendulum.py | """
Dyna benchmark on Pendulum
"""
from baconian.benchmark.dyna_benchmark.pendulum_conf import PENDULUM_BENCHMARK_CONFIG_DICT as exp_config
from baconian.common.noise import *
from baconian.common.schedules import *
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.dyna import Dyna
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.envs.envs_reward_func import REWARD_FUNC_DICT
def pendulum_task_fn():
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Pendulum-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['DeterministicMLPPolicy'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
**exp_config['DynamicsModel']
)
algo = Dyna(env_spec=env_spec,
name=name + '_dyna_algo',
model_free_algo=ddpg,
dynamics_model=mlp_dyna,
config_or_config_dict=dict(
dynamics_model_train_iter=10,
model_free_algo_train_iter=10
))
algo.set_terminal_reward_function_for_dynamics_env(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn),
reward_func=REWARD_FUNC_DICT['Pendulum-v0']())
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OrnsteinUhlenbeckActionNoise(np.zeros(1, ), 0.15),
noise_weight_scheduler=ConstantScheduler(value=0.3),
action_weight_scheduler=ConstantScheduler(value=1.0)),
name=name + '_agent')
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['DynaFlow'],
func_dict={
'train_algo': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_agent_training')},
'train_algo_from_synthesized_data': {'func': agent.train,
'args': list(),
# TODO use a decomposed way to represetn the state
# e.g., TRAIN:AGENT:CYBER
'kwargs': dict(state='state_agent_training', train_iter=1)},
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_dynamics_training')},
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=10, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
env=agent.env,
sample_type='transition',
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
sample_type='transition',
env=agent.algo.dynamics_env,
in_which_status='TRAIN',
store_flag=False)}
}
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 5,911 | 46.296 | 110 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/mountaincar.py | """
Dyna benchmark on Pendulum
"""
from baconian.benchmark.dyna_benchmark.mountaincar_conf import MOUNTAIN_CAR_BENCHMARK_CONFIG_DICT as exp_config
from baconian.common.noise import *
from baconian.common.schedules import *
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.ddpg import DDPG
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.algo.dyna import Dyna
from baconian.algo.dynamics.terminal_func.terminal_func import FixedEpisodeLengthTerminalFunc
from baconian.core.flow.dyna_flow import DynaFlow
from baconian.envs.envs_reward_func import REWARD_FUNC_DICT
def pendulum_task_fn():
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make(exp_config['env_id'])
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
policy = DeterministicMLPPolicy(env_spec=env_spec,
name_scope=name + '_mlp_policy',
name=name + '_mlp_policy',
output_low=env_spec.action_space.low,
output_high=env_spec.action_space.high,
**exp_config['DeterministicMLPPolicy'],
reuse=False)
ddpg = DDPG(
env_spec=env_spec,
policy=policy,
value_func=mlp_q,
name=name + '_ddpg',
**exp_config['DDPG']
)
mlp_dyna = ContinuousMLPGlobalDynamicsModel(
env_spec=env_spec,
name_scope=name + '_mlp_dyna',
name=name + '_mlp_dyna',
**exp_config['DynamicsModel']
)
algo = Dyna(env_spec=env_spec,
name=name + '_dyna_algo',
model_free_algo=ddpg,
dynamics_model=mlp_dyna,
config_or_config_dict=dict(
dynamics_model_train_iter=10,
model_free_algo_train_iter=10
))
algo.set_terminal_reward_function_for_dynamics_env(
terminal_func=FixedEpisodeLengthTerminalFunc(max_step_length=env.unwrapped._max_episode_steps,
step_count_fn=algo.dynamics_env.total_step_count_fn),
reward_func=REWARD_FUNC_DICT['Pendulum-v0']())
agent = Agent(env=env, env_spec=env_spec,
algo=algo,
exploration_strategy=None,
noise_adder=AgentActionNoiseWrapper(noise=OrnsteinUhlenbeckActionNoise(np.zeros(1, ), 0.15),
noise_weight_scheduler=ConstantScheduler(value=0.3),
action_weight_scheduler=ConstantScheduler(value=1.0)),
name=name + '_agent')
flow = DynaFlow(
train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['DynaFlow'],
func_dict={
'train_algo': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_agent_training')},
'train_algo_from_synthesized_data': {'func': agent.train,
'args': list(),
# TODO use a decomposed way to represetn the state
# e.g., TRAIN:AGENT:CYBER
'kwargs': dict(state='state_agent_training', train_iter=1)},
'train_dynamics': {'func': agent.train,
'args': list(),
'kwargs': dict(state='state_dynamics_training')},
'test_algo': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=1)},
'test_dynamics': {'func': agent.algo.test_dynamics,
'args': list(),
'kwargs': dict(sample_count=10, env=env)},
'sample_from_real_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
env=agent.env,
sample_type='transition',
in_which_status='TRAIN',
store_flag=True)},
'sample_from_dynamics_env': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=20,
sample_type='transition',
env=agent.algo.dynamics_env,
in_which_status='TRAIN',
store_flag=False)}
}
)
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 5,925 | 46.408 | 111 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/pendulum_conf.py | PENDULUM_BENCHMARK_CONFIG_DICT = {
'env_id': "Pendulum-v0",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 200,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DeterministicMLPPolicy': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 8,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DDPG': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 10000,
"GAMMA": 0.99,
"CRITIC_LEARNING_RATE": 0.001,
"ACTOR_LEARNING_RATE": 0.0001,
"DECAY": 0.5,
"BATCH_SIZE": 128,
"TRAIN_ITERATION": 120,
"critic_clip_norm": None,
"actor_clip_norm": None,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'initial_p': 1.0,
'final_p': 0.0,
'schedule_timesteps': 10000
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_ENV_STEP_TRAIN_SAMPLE_COUNT=10000),
'DynaFlow': {
"TEST_ALGO_EVERY_REAL_SAMPLE_COUNT": 100,
"TEST_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 100,
"TRAIN_DYNAMICS_EVERY_REAL_SAMPLE_COUNT": 20,
"START_TRAIN_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TRAIN_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"START_TEST_ALGO_AFTER_SAMPLE_COUNT": 1,
"START_TEST_DYNAMICS_AFTER_SAMPLE_COUNT": 1,
"WARM_UP_DYNAMICS_SAMPLES": 10000,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_REAL_ENV": 20,
"TRAIN_ALGO_EVERY_REAL_SAMPLE_COUNT_FROM_DYNAMICS_ENV": 20,
},
'DynamicsModel': dict(learning_rate=0.01,
mlp_config=[
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 32,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "IDENTITY",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 3,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
])
}
| 4,484 | 32.222222 | 82 | py |
baconian-project | baconian-project-master/baconian/benchmark/dyna_benchmark/__init__.py | from baconian.benchmark.dyna_benchmark.pendulum import pendulum_task_fn as dyna_pendulum_task_fn
| 97 | 48 | 96 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/acrobot_conf.py |
ACROBOT_BENCHMARK_CONFIG_DICT = {
'env_id': "Acrobot-v1",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 256,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DQN': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 50000,
"GAMMA": 0.99,
"DECAY": 0,
"BATCH_SIZE": 32,
"TRAIN_ITERATION": 1,
"LEARNING_RATE": 0.001,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'LinearScheduler': {
'initial_p': 1.0,
'final_p': 0.02,
'schedule_timesteps': int(0.1 * 100000)
},
'config_or_config_dict': {
"init_random_prob": 1.0
}
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 3,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 100,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 1000,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=100000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
| 2,169 | 27.552632 | 79 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/cartpole.py | """
DQN benchmark on cart pole
"""
from baconian.benchmark.dqn_benchmark.cartpole_conf import *
from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.config.global_config import GlobalConfig
from baconian.common.schedules import LinearScheduler
from baconian.core.status import get_global_status_collect
def cartpole_task_fn():
exp_config = CARTPOLE_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('CartPole-v1')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
dqn = DQN(env_spec=env_spec,
name=name + '_dqn',
value_func=mlp_q,
**exp_config['DQN'])
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
**exp_config['EpsilonGreedy']['LinearScheduler']),
**exp_config['EpsilonGreedy']['config_or_config_dict']))
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 3,481 | 47.361111 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/mountaincar_conf.py |
MOUNTAINCAR_BENCHMARK_CONFIG_DICT = {
'env_id': "MountainCar-v0",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 256,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DQN': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 50000,
"GAMMA": 0.99,
"DECAY": 0,
"BATCH_SIZE": 32,
"TRAIN_ITERATION": 1,
"LEARNING_RATE": 0.001,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'LinearScheduler': {
'initial_p': 1.0,
'final_p': 0.1,
'schedule_timesteps': int(0.1 * 100000)
},
'config_or_config_dict': {
"init_random_prob": 1.0
}
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 3,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 100,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 1000,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=100000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
| 2,176 | 27.644737 | 79 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/lunarlander.py | """
DQN benchmark on lunarlander
"""
from baconian.benchmark.dqn_benchmark.lunarlander_conf import *
from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.config.global_config import GlobalConfig
from baconian.common.schedules import LinearScheduler
from baconian.core.status import get_global_status_collect
def lunarlander_task_fn():
exp_config = LUNARLANDER_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('LunarLander-v2')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
dqn = DQN(env_spec=env_spec,
name=name + '_dqn',
value_func=mlp_q,
**exp_config['DQN'])
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
**exp_config['EpsilonGreedy']['LinearScheduler']),
**exp_config['EpsilonGreedy']['config_or_config_dict']))
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 3,496 | 46.90411 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/mountaincar.py | """
DQN benchmark on mountaincar
"""
from baconian.benchmark.dqn_benchmark.mountaincar_conf import MOUNTAINCAR_BENCHMARK_CONFIG_DICT
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.dqn import DQN
from baconian.algo.misc import EpsilonGreedy
from baconian.core.agent import Agent
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.config.global_config import GlobalConfig
from baconian.core.status import get_global_status_collect
from baconian.common.schedules import *
def mountaincar_task_fn():
exp_config = MOUNTAINCAR_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('MountainCar-v0')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
dqn = DQN(env_spec=env_spec,
name=name + '_dqn',
value_func=mlp_q,
**exp_config['DQN'])
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
**exp_config['EpsilonGreedy']['LinearScheduler']),
**exp_config['EpsilonGreedy']['config_or_config_dict']))
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run() | 3,513 | 47.805556 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/__init__.py | from baconian.benchmark.dqn_benchmark.acrobot import acrobot_task_fn
from baconian.benchmark.dqn_benchmark.lunarlander import lunarlander_task_fn
| 146 | 48 | 76 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/acrobot.py | """
DQN benchmark on acrobot
"""
from baconian.benchmark.dqn_benchmark.acrobot_conf import *
from baconian.algo.dqn import DQN
from baconian.core.core import EnvSpec
from baconian.envs.gym_env import make
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.core.agent import Agent
from baconian.algo.misc import EpsilonGreedy
from baconian.core.experiment import Experiment
from baconian.core.flow.train_test_flow import TrainTestFlow
from baconian.config.global_config import GlobalConfig
from baconian.common.schedules import LinearScheduler
from baconian.core.status import get_global_status_collect
def acrobot_task_fn():
exp_config = ACROBOT_BENCHMARK_CONFIG_DICT
GlobalConfig().set('DEFAULT_EXPERIMENT_END_POINT',
exp_config['DEFAULT_EXPERIMENT_END_POINT'])
env = make('Acrobot-v1')
name = 'benchmark'
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
mlp_q = MLPQValueFunction(env_spec=env_spec,
name_scope=name + '_mlp_q',
name=name + '_mlp_q',
**exp_config['MLPQValueFunction'])
dqn = DQN(env_spec=env_spec,
name=name + '_dqn',
value_func=mlp_q,
**exp_config['DQN'])
agent = Agent(env=env, env_spec=env_spec,
algo=dqn,
name=name + '_agent',
exploration_strategy=EpsilonGreedy(action_space=env_spec.action_space,
prob_scheduler=LinearScheduler(
t_fn=lambda: get_global_status_collect()(
'TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
**exp_config['EpsilonGreedy']['LinearScheduler']),
**exp_config['EpsilonGreedy']['config_or_config_dict']))
flow = TrainTestFlow(train_sample_count_func=lambda: get_global_status_collect()('TOTAL_AGENT_TRAIN_SAMPLE_COUNT'),
config_or_config_dict=exp_config['TrainTestFlow']['config_or_config_dict'],
func_dict={
'test': {'func': agent.test,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TEST_SAMPLES_COUNT']),
},
'train': {'func': agent.train,
'args': list(),
'kwargs': dict(),
},
'sample': {'func': agent.sample,
'args': list(),
'kwargs': dict(sample_count=exp_config['TrainTestFlow']['TRAIN_SAMPLES_COUNT'],
env=agent.env,
in_which_status='TRAIN',
store_flag=True),
},
})
experiment = Experiment(
tuner=None,
env=env,
agent=agent,
flow=flow,
name=name
)
experiment.run()
| 3,475 | 47.277778 | 119 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/cartpole_conf.py |
CARTPOLE_BENCHMARK_CONFIG_DICT = {
'env_id': "CartPole-v1",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 256,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DQN': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 50000,
"GAMMA": 0.99,
"DECAY": 0,
"BATCH_SIZE": 32,
"TRAIN_ITERATION": 1,
"LEARNING_RATE": 0.001,
"UPDATE_TARGET_Q_FREQUENCY": 1,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'LinearScheduler': {
'initial_p': 1.0,
'final_p': 0.02,
'schedule_timesteps': int(0.1 * 100000)
},
'config_or_config_dict': {
"init_random_prob": 1.0
}
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 3,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 100,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 1000,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=100000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
| 2,215 | 27.779221 | 79 | py |
baconian-project | baconian-project-master/baconian/benchmark/dqn_benchmark/lunarlander_conf.py |
LUNARLANDER_BENCHMARK_CONFIG_DICT = {
'env_id': "LunarLander-v2",
'MLPQValueFunction': {
'mlp_config': [
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "1",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "TANH",
"B_INIT_VALUE": 0.0,
"NAME": "2",
"N_UNITS": 64,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "RELU",
"B_INIT_VALUE": 0.0,
"NAME": "3",
"N_UNITS": 256,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
},
{
"ACT": "LINEAR",
"B_INIT_VALUE": 0.0,
"NAME": "OUPTUT",
"N_UNITS": 1,
"TYPE": "DENSE",
"W_NORMAL_STDDEV": 0.03
}
]
},
'DQN': {
'config_or_config_dict': {
"REPLAY_BUFFER_SIZE": 100000,
"GAMMA": 0.99,
"DECAY": 0,
"BATCH_SIZE": 32,
"TRAIN_ITERATION": 1,
"LEARNING_RATE": 0.001,
"UPDATE_TARGET_Q_FREQUENCY": 1,
},
'replay_buffer': None
},
'EpsilonGreedy': {
'LinearScheduler': {
'initial_p': 1.0,
'final_p': 0.05,
'schedule_timesteps': int(0.1 * 200000)
},
'config_or_config_dict': {
"init_random_prob": 1.0
}
},
'TrainTestFlow': {
"TEST_SAMPLES_COUNT": 1,
"TRAIN_SAMPLES_COUNT": 1,
'config_or_config_dict': {
"TEST_EVERY_SAMPLE_COUNT": 100,
"TRAIN_EVERY_SAMPLE_COUNT": 1,
"START_TRAIN_AFTER_SAMPLE_COUNT": 1000,
"START_TEST_AFTER_SAMPLE_COUNT": 0,
}
},
'DEFAULT_EXPERIMENT_END_POINT': dict(TOTAL_AGENT_TRAIN_SAMPLE_COUNT=200000,
TOTAL_AGENT_TEST_SAMPLE_COUNT=None,
TOTAL_AGENT_UPDATE_COUNT=None),
}
| 2,222 | 27.87013 | 79 | py |
baconian-project | baconian-project-master/baconian/algo/ddpg.py | from baconian.core.core import EnvSpec
from baconian.algo.rl_algo import ModelFreeAlgo, OffPolicyAlgo
from baconian.config.dict_config import DictConfig
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.algo.misc.replay_buffer import UniformRandomReplayBuffer, BaseReplayBuffer
import tensorflow as tf
from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.config.global_config import GlobalConfig
from baconian.common.special import *
from baconian.algo.policy import DeterministicMLPPolicy
from baconian.tf.util import *
from baconian.common.misc import *
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.algo.misc.placeholder_input import MultiPlaceholderInput
from baconian.tf.util import clip_grad
class DDPG(ModelFreeAlgo, OffPolicyAlgo, MultiPlaceholderInput):
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_DDPG_REQUIRED_KEY_LIST)
@typechecked
def __init__(self,
env_spec: EnvSpec,
config_or_config_dict: (DictConfig, dict),
value_func: MLPQValueFunction,
policy: DeterministicMLPPolicy,
schedule_param_list=None,
name='ddpg',
replay_buffer=None):
"""
:param env_spec: environment specifications, like action apace or observation space
:param config_or_config_dict: configuraion dictionary, like learning rate or decay, if any
:param value_func: value function
:param policy: agent policy
:param schedule_param_list: schedule parameter list, if any initla final function to schedule learning process
:param name: name of algorithm class instance
:param replay_buffer: replay buffer, if any
"""
ModelFreeAlgo.__init__(self, env_spec=env_spec, name=name)
config = construct_dict_config(config_or_config_dict, self)
self.config = config
self.actor = policy
self.target_actor = self.actor.make_copy(name_scope='{}_target_actor'.format(self.name),
name='{}_target_actor'.format(self.name),
reuse=False)
self.critic = value_func
self.target_critic = self.critic.make_copy(name_scope='{}_target_critic'.format(self.name),
name='{}_target_critic'.format(self.name),
reuse=False)
self.state_input = self.actor.state_input
if replay_buffer:
assert issubclass(replay_buffer, BaseReplayBuffer)
self.replay_buffer = replay_buffer
else:
self.replay_buffer = UniformRandomReplayBuffer(limit=self.config('REPLAY_BUFFER_SIZE'),
action_shape=self.env_spec.action_shape,
observation_shape=self.env_spec.obs_shape)
"""
self.parameters contains all the parameters (variables) of the algorithm
"""
self.parameters = ParametersWithTensorflowVariable(tf_var_list=[],
rest_parameters=dict(),
to_scheduler_param_tuple=schedule_param_list,
name='ddpg_param',
source_config=config,
require_snapshot=False)
self._critic_with_actor_output = self.critic.make_copy(reuse=True,
name='actor_input_{}'.format(self.critic.name),
state_input=self.state_input,
action_input=self.actor.action_tensor)
self._target_critic_with_target_actor_output = self.target_critic.make_copy(reuse=True,
name='target_critic_with_target_actor_output_{}'.format(
self.critic.name),
action_input=self.target_actor.action_tensor)
with tf.variable_scope(name):
self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
self.next_state_input = tf.placeholder(shape=[None, self.env_spec.flat_obs_dim], dtype=tf.float32)
self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)
self.target_q_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
done = tf.cast(self.done_input, dtype=tf.float32)
self.predict_q_value = (1. - done) * self.config('GAMMA') * self.target_q_input + self.reward_input
with tf.variable_scope('train'):
self.critic_loss, self.critic_update_op, self.target_critic_update_op, self.critic_optimizer, \
self.critic_grads = self._setup_critic_loss()
self.actor_loss, self.actor_update_op, self.target_actor_update_op, self.action_optimizer, \
self.actor_grads = self._set_up_actor_loss()
var_list = get_tf_collection_var_list(
'{}/train'.format(name)) + self.critic_optimizer.variables() + self.action_optimizer.variables()
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
MultiPlaceholderInput.__init__(self,
sub_placeholder_input_list=[dict(obj=self.target_actor,
attr_name='target_actor',
),
dict(obj=self.actor,
attr_name='actor'),
dict(obj=self.critic,
attr_name='critic'),
dict(obj=self.target_critic,
attr_name='target_critic')
],
parameters=self.parameters)
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self, sess=None, source_obj=None):
self.actor.init()
self.critic.init()
self.target_actor.init()
self.target_critic.init(source_obj=self.critic)
self.parameters.init()
if source_obj:
self.copy_from(source_obj)
super().init()
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train', under_status='TRAIN')
def train(self, batch_data=None, train_iter=None, sess=None, update_target=True) -> dict:
super(DDPG, self).train()
if isinstance(batch_data, TrajectoryData):
batch_data = batch_data.return_as_transition_data(shuffle_flag=True)
tf_sess = sess if sess else tf.get_default_session()
train_iter = self.parameters("TRAIN_ITERATION") if not train_iter else train_iter
average_critic_loss = 0.0
average_actor_loss = 0.0
for i in range(train_iter):
train_batch = self.replay_buffer.sample(
batch_size=self.parameters('BATCH_SIZE')) if batch_data is None else batch_data
assert isinstance(train_batch, TransitionData)
critic_loss, _ = self._critic_train(train_batch, tf_sess)
actor_loss, _ = self._actor_train(train_batch, tf_sess)
average_actor_loss += actor_loss
average_critic_loss += critic_loss
if update_target:
tf_sess.run([self.target_actor_update_op, self.target_critic_update_op])
return dict(average_actor_loss=average_actor_loss / train_iter,
average_critic_loss=average_critic_loss / train_iter)
def _critic_train(self, batch_data, sess) -> ():
target_q = sess.run(
self._target_critic_with_target_actor_output.q_tensor,
feed_dict={
self._target_critic_with_target_actor_output.state_input: batch_data.new_state_set,
self.target_actor.state_input: batch_data.new_state_set
}
)
loss, _, grads = sess.run(
[self.critic_loss, self.critic_update_op, self.critic_grads
],
feed_dict={
self.target_q_input: target_q,
self.critic.state_input: batch_data.state_set,
self.critic.action_input: batch_data.action_set,
self.done_input: np.reshape(batch_data.done_set, [-1, 1]),
self.reward_input: np.reshape(batch_data.reward_set, [-1, 1]),
**self.parameters.return_tf_parameter_feed_dict()
}
)
return loss, grads
def _actor_train(self, batch_data, sess) -> ():
target_q, loss, _, grads = sess.run(
[self._critic_with_actor_output.q_tensor, self.actor_loss, self.actor_update_op, self.actor_grads],
feed_dict={
self.actor.state_input: batch_data.state_set,
self._critic_with_actor_output.state_input: batch_data.state_set,
**self.parameters.return_tf_parameter_feed_dict()
}
)
return loss, grads
@register_counter_info_to_status_decorator(increment=1, info_key='test', under_status='TEST')
def test(self, *arg, **kwargs) -> dict:
return super().test(*arg, **kwargs)
def predict(self, obs: np.ndarray, sess=None, batch_flag: bool = False):
tf_sess = sess if sess else tf.get_default_session()
feed_dict = {
self.state_input: make_batch(obs, original_shape=self.env_spec.obs_shape),
**self.parameters.return_tf_parameter_feed_dict()
}
return self.actor.forward(obs=obs, sess=tf_sess, feed_dict=feed_dict)
def append_to_memory(self, samples: TransitionData):
self.replay_buffer.append_batch(obs0=samples.state_set,
obs1=samples.new_state_set,
action=samples.action_set,
reward=samples.reward_set,
terminal1=samples.done_set)
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
MultiPlaceholderInput.save(self, save_path=save_path, global_step=global_step, name=name, **kwargs)
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
MultiPlaceholderInput.load(self, path_to_model, model_name, global_step, **kwargs)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
def _setup_critic_loss(self):
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.critic.name_scope)
loss = tf.reduce_sum((self.predict_q_value - self.critic.q_tensor) ** 2)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('CRITIC_LEARNING_RATE'))
grad_var_pair = optimizer.compute_gradients(loss=loss, var_list=self.critic.parameters('tf_var_list'))
grads = [g[0] for g in grad_var_pair]
if self.parameters('critic_clip_norm') is not None:
grad_var_pair, grads = clip_grad(optimizer=optimizer,
loss=loss,
var_list=self.critic.parameters('tf_var_list'),
clip_norm=self.parameters('critic_clip_norm'))
optimize_op = optimizer.apply_gradients(grad_var_pair)
op = []
for var, target_var in zip(self.critic.parameters('tf_var_list'),
self.target_critic.parameters('tf_var_list')):
ref_val = self.parameters('DECAY') * target_var + (1.0 - self.parameters('DECAY')) * var
op.append(tf.assign(target_var, ref_val))
return loss, optimize_op, op, optimizer, grads
def _set_up_actor_loss(self):
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.actor.name_scope)
loss = -tf.reduce_mean(self._critic_with_actor_output.q_tensor)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('CRITIC_LEARNING_RATE'))
grad_var_pair = optimizer.compute_gradients(loss=loss, var_list=self.actor.parameters('tf_var_list'))
grads = [g[0] for g in grad_var_pair]
if self.parameters('actor_clip_norm') is not None:
grad_var_pair, grads = clip_grad(optimizer=optimizer,
loss=loss,
var_list=self.actor.parameters('tf_var_list'),
clip_norm=self.parameters('critic_clip_norm'))
optimize_op = optimizer.apply_gradients(grad_var_pair)
op = []
for var, target_var in zip(self.actor.parameters('tf_var_list'),
self.target_actor.parameters('tf_var_list')):
ref_val = self.parameters('DECAY') * target_var + (1.0 - self.parameters('DECAY')) * var
op.append(tf.assign(target_var, ref_val))
return loss, optimize_op, op, optimizer, grads
# todo identify API and their examples, limitations
| 14,755 | 55.320611 | 140 | py |
baconian-project | baconian-project-master/baconian/algo/mbmf.py | """
Model-based Model-free fine tune
"""
| 41 | 9.5 | 32 | py |
baconian-project | baconian-project-master/baconian/algo/algo.py | from baconian.core.core import Basic, EnvSpec, Env
from baconian.core.status import StatusWithSubInfo
import abc
from typeguard import typechecked
from baconian.common.logging import Recorder
from baconian.core.parameters import Parameters
from baconian.common.sampler.sample_data import TrajectoryData
class Algo(Basic):
"""
Abstract class for algorithms
"""
STATUS_LIST = ['CREATED', 'INITED', 'TRAIN', 'TEST']
INIT_STATUS = 'CREATED'
@typechecked
def __init__(self, env_spec: EnvSpec, name: str = 'algo', warm_up_trajectories_number=0):
"""
Constructor
:param env_spec: environment specifications
:type env_spec: EnvSpec
:param name: name of the algorithm
:type name: str
:param warm_up_trajectories_number: how many trajectories used to warm up the training
:type warm_up_trajectories_number: int
"""
super().__init__(status=StatusWithSubInfo(obj=self), name=name)
self.env_spec = env_spec
self.parameters = Parameters(dict())
self.recorder = Recorder(default_obj=self)
self.warm_up_trajectories_number = warm_up_trajectories_number
def init(self):
"""
Initialization method, such as network random initialization in Tensorflow
:return:
"""
self._status.set_status('INITED')
def warm_up(self, trajectory_data: TrajectoryData):
"""
Use some data to warm up the algorithm, e.g., compute the mean/std-dev of the state to perform normalization.
Data used in warm up process will not be added into the memory
:param trajectory_data: TrajectoryData object
:type trajectory_data: TrajectoryData
:return: None
"""
pass
def train(self, *arg, **kwargs) -> dict:
"""
Training API, specific arguments should be defined by each algorithms itself.
:return: training results, e.g., loss
:rtype: dict
"""
self._status.set_status('TRAIN')
return dict()
def test(self, *arg, **kwargs) -> dict:
"""
Testing API, most of the evaluation can be done by agent instead of algorithms, so this API can be skipped
:return: test results, e.g., rewards
:rtype: dict
"""
self._status.set_status('TEST')
return dict()
@abc.abstractmethod
def predict(self, *arg, **kwargs):
"""
Predict function, given the obs as input, return the action, obs will be read as the first argument passed into
this API, like algo.predict(obs=x, ...)
:return: predicted action
:rtype: np.ndarray
"""
raise NotImplementedError
@abc.abstractmethod
def append_to_memory(self, *args, **kwargs):
"""
For off-policy algorithm, use this API to append the data into replay buffer. samples will be read as the first
argument passed into this API, like algo.append_to_memory(samples=x, ...)
"""
raise NotImplementedError
@property
def is_training(self):
"""
A boolean indicate the if the algorithm is in training status
:return: True if in training
:rtype: bool
"""
return self.get_status()['status'] == 'TRAIN'
@property
def is_testing(self):
"""
A boolean indicate the if the algorithm is in training status
:return: True if in testing
:rtype: bool
"""
return self.get_status()['status'] == 'TEST'
| 3,560 | 29.698276 | 119 | py |
baconian-project | baconian-project-master/baconian/algo/ppo.py | from baconian.core.core import EnvSpec
from baconian.algo.rl_algo import ModelFreeAlgo, OnPolicyAlgo
from baconian.config.dict_config import DictConfig
import tensorflow as tf
from baconian.algo.distribution.mvn import kl, entropy, log_prob
from baconian.common.sampler.sample_data import TrajectoryData, TransitionData, SampleData
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.config.global_config import GlobalConfig
from baconian.algo.policy.policy import StochasticPolicy
from baconian.algo.value_func import VValueFunction
from baconian.tf.util import *
from baconian.common.misc import *
from baconian.algo.misc import SampleProcessor
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.algo.misc.placeholder_input import MultiPlaceholderInput, PlaceholderInput
from baconian.common.error import *
from baconian.common.data_pre_processing import RunningStandardScaler
from baconian.common.special import *
class PPO(ModelFreeAlgo, OnPolicyAlgo, MultiPlaceholderInput):
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_PPO_REQUIRED_KEY_LIST)
@typechecked
def __init__(self, env_spec: EnvSpec,
stochastic_policy: StochasticPolicy,
config_or_config_dict: (DictConfig, dict),
value_func: VValueFunction,
warm_up_trajectories_number=5,
use_time_index_flag=False,
name='ppo'):
ModelFreeAlgo.__init__(self, env_spec=env_spec,
name=name,
warm_up_trajectories_number=warm_up_trajectories_number)
self.use_time_index_flag = use_time_index_flag
self.config = construct_dict_config(config_or_config_dict, self)
self.policy = stochastic_policy
self.value_func = value_func
to_ph_parameter_dict = dict()
self.trajectory_memory = TrajectoryData(env_spec=env_spec)
self.transition_data_for_trajectory = TransitionData(env_spec=env_spec)
self.value_func_train_data_buffer = None
self.scaler = RunningStandardScaler(dims=self.env_spec.flat_obs_dim)
if use_time_index_flag:
scale_last_time_index_mean = self.scaler._mean
scale_last_time_index_mean[-1] = 0
scale_last_time_index_var = self.scaler._var
scale_last_time_index_var[-1] = 1000 * 1000
self.scaler.set_param(mean=scale_last_time_index_mean, var=scale_last_time_index_var)
with tf.variable_scope(name):
self.advantages_ph = tf.placeholder(tf.float32, (None,), 'advantages')
self.v_func_val_ph = tf.placeholder(tf.float32, (None,), 'val_val_func')
dist_info_list = self.policy.get_dist_info()
self.old_dist_tensor = [
(tf.placeholder(**dict(dtype=dist_info['dtype'],
shape=dist_info['shape'],
name=dist_info['name'])), dist_info['name']) for dist_info in
dist_info_list
]
self.old_policy = self.policy.make_copy(reuse=False,
name_scope='old_{}'.format(self.policy.name),
name='old_{}'.format(self.policy.name),
distribution_tensors_tuple=tuple(self.old_dist_tensor))
to_ph_parameter_dict['beta'] = tf.placeholder(tf.float32, (), 'beta')
to_ph_parameter_dict['eta'] = tf.placeholder(tf.float32, (), 'eta')
to_ph_parameter_dict['kl_target'] = tf.placeholder(tf.float32, (), 'kl_target')
to_ph_parameter_dict['lr_multiplier'] = tf.placeholder(tf.float32, (), 'lr_multiplier')
self.parameters = ParametersWithTensorflowVariable(tf_var_list=[],
rest_parameters=dict(
advantages_ph=self.advantages_ph,
v_func_val_ph=self.v_func_val_ph,
),
to_ph_parameter_dict=to_ph_parameter_dict,
name='ppo_param',
save_rest_param_flag=False,
source_config=self.config,
require_snapshot=False)
with tf.variable_scope(name):
with tf.variable_scope('train'):
self.kl = tf.reduce_mean(self.old_policy.kl(self.policy))
self.average_entropy = tf.reduce_mean(self.policy.entropy())
self.policy_loss, self.policy_optimizer, self.policy_update_op = self._setup_policy_loss()
self.value_func_loss, self.value_func_optimizer, self.value_func_update_op = self._setup_value_func_loss()
var_list = get_tf_collection_var_list(
'{}/train'.format(name)) + self.policy_optimizer.variables() + self.value_func_optimizer.variables()
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
MultiPlaceholderInput.__init__(self,
sub_placeholder_input_list=[dict(obj=self.value_func,
attr_name='value_func',
),
dict(obj=self.policy,
attr_name='policy')],
parameters=self.parameters)
def warm_up(self, trajectory_data: TrajectoryData):
for traj in trajectory_data.trajectories:
self.scaler.update_scaler(data=traj.state_set)
if self.use_time_index_flag:
scale_last_time_index_mean = self.scaler._mean
scale_last_time_index_mean[-1] = 0
scale_last_time_index_var = self.scaler._var
scale_last_time_index_var[-1] = 1000 * 1000
self.scaler.set_param(mean=scale_last_time_index_mean, var=scale_last_time_index_var)
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self, sess=None, source_obj=None):
self.policy.init()
self.value_func.init()
self.parameters.init()
if source_obj:
self.copy_from(source_obj)
super().init()
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train', under_status='TRAIN')
def train(self, trajectory_data: TrajectoryData = None, train_iter=None, sess=None) -> dict:
super(PPO, self).train()
if trajectory_data is None:
trajectory_data = self.trajectory_memory
if len(trajectory_data) == 0:
raise MemoryBufferLessThanBatchSizeError('not enough trajectory data')
for i, traj in enumerate(trajectory_data.trajectories):
trajectory_data.trajectories[i].append_new_set(name='state_set',
shape=self.env_spec.obs_shape,
data_set=np.reshape(
np.array(self.scaler.process(np.array(traj.state_set))),
[-1] + list(self.env_spec.obs_shape)))
trajectory_data.trajectories[i].append_new_set(name='new_state_set',
shape=self.env_spec.obs_shape,
data_set=np.reshape(
np.array(
self.scaler.process(np.array(traj.new_state_set))),
[-1] + list(self.env_spec.obs_shape)))
tf_sess = sess if sess else tf.get_default_session()
SampleProcessor.add_estimated_v_value(trajectory_data, value_func=self.value_func)
SampleProcessor.add_discount_sum_reward(trajectory_data,
gamma=self.parameters('gamma'))
SampleProcessor.add_gae(trajectory_data,
gamma=self.parameters('gamma'),
name='advantage_set',
lam=self.parameters('lam'),
value_func=self.value_func)
trajectory_data = SampleProcessor.normalization(trajectory_data, key='advantage_set')
policy_res_dict = self._update_policy(
state_set=np.concatenate([t('state_set') for t in trajectory_data.trajectories], axis=0),
action_set=np.concatenate([t('action_set') for t in trajectory_data.trajectories], axis=0),
advantage_set=np.concatenate([t('advantage_set') for t in trajectory_data.trajectories], axis=0),
train_iter=train_iter if train_iter else self.parameters(
'policy_train_iter'),
sess=tf_sess)
value_func_res_dict = self._update_value_func(
state_set=np.concatenate([t('state_set') for t in trajectory_data.trajectories], axis=0),
discount_set=np.concatenate([t('discount_set') for t in trajectory_data.trajectories], axis=0),
train_iter=train_iter if train_iter else self.parameters(
'value_func_train_iter'),
sess=tf_sess)
trajectory_data.reset()
self.trajectory_memory.reset()
return {
**policy_res_dict,
**value_func_res_dict
}
@register_counter_info_to_status_decorator(increment=1, info_key='test', under_status='TEST')
def test(self, *arg, **kwargs) -> dict:
return super().test(*arg, **kwargs)
@register_counter_info_to_status_decorator(increment=1, info_key='predict')
def predict(self, obs: np.ndarray, sess=None, batch_flag: bool = False):
tf_sess = sess if sess else tf.get_default_session()
ac = self.policy.forward(obs=self.scaler.process(data=make_batch(obs, original_shape=self.env_spec.obs_shape)),
sess=tf_sess,
feed_dict=self.parameters.return_tf_parameter_feed_dict())
return ac
def append_to_memory(self, samples: TrajectoryData):
# todo how to make sure the data's time sequential
obs_list = samples.trajectories[0].state_set
for i in range(1, len(samples.trajectories)):
obs_list = np.array(np.concatenate([obs_list, samples.trajectories[i].state_set], axis=0))
self.trajectory_memory.union(samples)
self.scaler.update_scaler(data=np.array(obs_list))
if self.use_time_index_flag:
scale_last_time_index_mean = self.scaler._mean
scale_last_time_index_mean[-1] = 0
scale_last_time_index_var = self.scaler._var
scale_last_time_index_var[-1] = 1000 * 1000
self.scaler.set_param(mean=scale_last_time_index_mean, var=scale_last_time_index_var)
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
MultiPlaceholderInput.save(self, save_path=save_path, global_step=global_step, name=name, **kwargs)
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
MultiPlaceholderInput.load(self, path_to_model, model_name, global_step, **kwargs)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
def _setup_policy_loss(self):
"""
Code clip from pat-cody
Three loss terms:
1) standard policy gradient
2) D_KL(pi_old || pi_new)
3) Hinge loss on [D_KL - kl_targ]^2
See: https://arxiv.org/pdf/1707.02286.pdf
"""
if self.parameters('clipping_range') is not None:
pg_ratio = tf.exp(self.policy.log_prob() - self.old_policy.log_prob())
clipped_pg_ratio = tf.clip_by_value(pg_ratio, 1 - self.parameters('clipping_range')[0],
1 + self.parameters('clipping_range')[1])
surrogate_loss = tf.minimum(self.advantages_ph * pg_ratio,
self.advantages_ph * clipped_pg_ratio)
loss = -tf.reduce_mean(surrogate_loss)
else:
loss1 = -tf.reduce_mean(self.advantages_ph *
tf.exp(self.policy.log_prob() - self.old_policy.log_prob()))
loss2 = tf.reduce_mean(self.parameters('beta') * self.kl)
loss3 = self.parameters('eta') * tf.square(
tf.maximum(0.0, self.kl - 2.0 * self.parameters('kl_target')))
loss = loss1 + loss2 + loss3
self.loss1 = loss1
self.loss2 = loss2
self.loss3 = loss3
if isinstance(self.policy, PlaceholderInput):
reg_list = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.policy.name_scope)
if len(reg_list) > 0:
reg_loss = tf.reduce_sum(reg_list)
loss += reg_loss
optimizer = tf.train.AdamOptimizer(
learning_rate=self.parameters('policy_lr') * self.parameters('lr_multiplier'))
train_op = optimizer.minimize(loss, var_list=self.policy.parameters('tf_var_list'))
return loss, optimizer, train_op
def _setup_value_func_loss(self):
# todo update the value_func design
loss = tf.reduce_mean(tf.square(tf.squeeze(self.value_func.v_tensor) - self.v_func_val_ph))
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.value_func.name_scope)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(self.parameters('value_func_lr'))
train_op = optimizer.minimize(loss, var_list=self.value_func.parameters('tf_var_list'))
return loss, optimizer, train_op
def _update_policy(self, state_set, action_set, advantage_set, train_iter, sess):
old_policy_feed_dict = dict()
res = sess.run([getattr(self.policy, tensor[1]) for tensor in self.old_dist_tensor],
feed_dict={
self.policy.parameters('state_input'): state_set,
self.policy.parameters('action_input'): action_set,
**self.parameters.return_tf_parameter_feed_dict()
})
for tensor, val in zip(self.old_dist_tensor, res):
old_policy_feed_dict[tensor[0]] = val
feed_dict = {
self.policy.parameters('action_input'): action_set,
self.old_policy.parameters('action_input'): action_set,
self.policy.parameters('state_input'): state_set,
self.advantages_ph: advantage_set,
**self.parameters.return_tf_parameter_feed_dict(),
**old_policy_feed_dict
}
average_loss, average_kl, average_entropy = 0.0, 0.0, 0.0
total_epoch = 0
kl = None
for i in range(train_iter):
_ = sess.run(self.policy_update_op, feed_dict=feed_dict)
loss, kl, entropy = sess.run(
[self.policy_loss, self.kl, self.average_entropy],
feed_dict=feed_dict)
average_loss += loss
average_kl += kl
average_entropy += entropy
total_epoch = i + 1
if kl > self.parameters('kl_target', require_true_value=True) * 4:
# early stopping if D_KL diverges badly
break
average_loss, average_kl, average_entropy = average_loss / total_epoch, average_kl / total_epoch, average_entropy / total_epoch
if kl > self.parameters('kl_target', require_true_value=True) * 2: # servo beta to reach D_KL target
self.parameters.set(key='beta',
new_val=np.minimum(35, 1.5 * self.parameters('beta', require_true_value=True)))
if self.parameters('beta', require_true_value=True) > 30 and self.parameters('lr_multiplier',
require_true_value=True) > 0.1:
self.parameters.set(key='lr_multiplier',
new_val=self.parameters('lr_multiplier', require_true_value=True) / 1.5)
elif kl < self.parameters('kl_target', require_true_value=True) / 2:
self.parameters.set(key='beta',
new_val=np.maximum(1 / 35, self.parameters('beta', require_true_value=True) / 1.5))
if self.parameters('beta', require_true_value=True) < (1 / 30) and self.parameters('lr_multiplier',
require_true_value=True) < 10:
self.parameters.set(key='lr_multiplier',
new_val=self.parameters('lr_multiplier', require_true_value=True) * 1.5)
return dict(
policy_average_loss=average_loss,
policy_average_kl=average_kl,
policy_average_entropy=average_entropy,
policy_total_train_epoch=total_epoch
)
def _update_value_func(self, state_set, discount_set, train_iter, sess):
y_hat = self.value_func.forward(obs=state_set).squeeze()
old_exp_var = 1 - np.var(discount_set - y_hat) / np.var(discount_set)
if self.value_func_train_data_buffer is None:
self.value_func_train_data_buffer = (state_set, discount_set)
else:
self.value_func_train_data_buffer = (
np.concatenate([self.value_func_train_data_buffer[0], state_set], axis=0),
np.concatenate([self.value_func_train_data_buffer[1], discount_set], axis=0))
if len(self.value_func_train_data_buffer[0]) > self.parameters('value_func_memory_size'):
self.value_func_train_data_buffer = tuple(
np.array(data[-self.parameters('value_func_memory_size'):]) for data in
self.value_func_train_data_buffer)
state_set_all, discount_set_all = self.value_func_train_data_buffer
param_dict = self.parameters.return_tf_parameter_feed_dict()
for i in range(train_iter):
random_index = np.random.choice(np.arange(len(state_set_all)), len(state_set_all))
state_set_all = state_set_all[random_index]
discount_set_all = discount_set_all[random_index]
for index in range(0,
len(state_set_all) - self.parameters('value_func_train_batch_size'),
self.parameters('value_func_train_batch_size')):
state = np.array(state_set_all[
index: index + self.parameters(
'value_func_train_batch_size')])
discount = discount_set_all[index: index + self.parameters(
'value_func_train_batch_size')]
loss, _ = sess.run([self.value_func_loss, self.value_func_update_op],
options=tf.RunOptions(report_tensor_allocations_upon_oom=True),
feed_dict={
self.value_func.state_input: state,
self.v_func_val_ph: discount,
**param_dict
})
y_hat = self.value_func.forward(obs=state_set).squeeze()
loss = np.mean(np.square(y_hat - discount_set))
exp_var = 1 - np.var(discount_set - y_hat) / np.var(discount_set)
return dict(
value_func_loss=loss,
value_func_policy_exp_var=exp_var,
value_func_policy_old_exp_var=old_exp_var
)
| 20,940 | 57.169444 | 135 | py |
baconian-project | baconian-project-master/baconian/algo/utils.py | def _get_copy_arg_with_tf_reuse(obj, kwargs: dict):
# kwargs = deepcopy(kwargs)
if 'reuse' in kwargs:
if kwargs['reuse'] is True:
if 'name_scope' in kwargs and kwargs['name_scope'] != obj.name_scope:
raise ValueError('If reuse, the name scope should be same instead of : {} and {}'.format(
kwargs['name_scope'], obj.name_scope))
else:
kwargs.update(name_scope=obj.name_scope)
else:
if 'name_scope' in kwargs and kwargs['name_scope'] == obj.name:
raise ValueError(
'If not reuse, the name scope should be different instead of: {} and {}'.format(
kwargs['name_scope'], obj.name_scope))
return kwargs
| 774 | 47.4375 | 105 | py |
baconian-project | baconian-project-master/baconian/algo/model_ensemble.py | from baconian.algo.dynamics.dynamics_model import DynamicsModel
from typeguard import typechecked
from baconian.algo.dynamics.mlp_dynamics_model import ContinuousMLPGlobalDynamicsModel
from baconian.config.dict_config import DictConfig
from baconian.common.sampler.sample_data import TransitionData
from baconian.core.ensemble import ModelEnsemble
from baconian.core.parameters import Parameters
from baconian.config.global_config import GlobalConfig
from baconian.algo.rl_algo import ModelFreeAlgo, ModelBasedAlgo
from baconian.common.misc import *
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.core.util import init_func_arg_record_decorator
from baconian.algo.misc.placeholder_input import PlaceholderInput
import os
from baconian.core.agent import Agent
class ModelEnsembleAlgo(ModelBasedAlgo):
"""
Model Ensemble method, with any compatible built-in model-free methods.
Kurutach, Thanard, et al. "Model-ensemble trust-region policy optimization." arXiv preprint arXiv:1802.10592 (2018).
"""
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_ALGO_ME_REQUIRED_KEY_LIST)
@init_func_arg_record_decorator()
@typechecked
def __init__(self, env_spec, dynamics_model: ModelEnsemble,
model_free_algo: ModelFreeAlgo,
config_or_config_dict: (DictConfig, dict),
name='model_ensemble'
):
if not isinstance(dynamics_model.model[0], ContinuousMLPGlobalDynamicsModel):
raise TypeError("Model ensemble elements should be of type ContinuousMLPGlobalDynamicsModel")
super().__init__(env_spec, dynamics_model, name)
config = construct_dict_config(config_or_config_dict, self)
parameters = Parameters(parameters=dict(),
name='dyna_param',
source_config=config)
sub_placeholder_input_list = []
if isinstance(dynamics_model, PlaceholderInput):
sub_placeholder_input_list.append(dict(obj=dynamics_model,
attr_name='dynamics_model'))
if isinstance(model_free_algo, PlaceholderInput):
sub_placeholder_input_list.append(dict(obj=model_free_algo,
attr_name='model_free_algo'))
self.model_free_algo = model_free_algo
self.config = config
self.parameters = parameters
self.result = list()
self.validation_result = [0] * len(dynamics_model)
self._dynamics_model.__class__ = ModelEnsemble
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self):
self.parameters.init()
self.model_free_algo.init()
self.dynamics_env.init()
super().init()
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train_counter', under_status='TRAIN')
def train(self, *args, **kwargs) -> dict:
super().train()
res_dict = {}
batch_data = kwargs['batch_data'] if 'batch_data' in kwargs else None
if 'state' in kwargs:
assert kwargs['state'] in ('state_dynamics_training', 'state_agent_training')
state = kwargs['state']
kwargs.pop('state')
else:
state = None
if not state or state == 'state_dynamics_training':
dynamics_train_res_dict = self._fit_dynamics_model(batch_data=batch_data,
train_iter=self.parameters('dynamics_model_train_iter'))
for key, val in dynamics_train_res_dict.items():
res_dict["{}_{}".format(self._dynamics_model.name, key)] = val
if not state or state == 'state_agent_training':
model_free_algo_train_res_dict = self._train_model_free_algo(batch_data=batch_data,
train_iter=self.parameters(
'model_free_algo_train_iter'))
for key, val in model_free_algo_train_res_dict.items():
res_dict['{}_{}'.format(self.model_free_algo.name, key)] = val
return res_dict
@register_counter_info_to_status_decorator(increment=1, info_key='test_counter', under_status='TEST')
def test(self, *arg, **kwargs):
return super().test(*arg, **kwargs)
def validate(self, agent: Agent, *args, **kwargs):
old_result = self.result
self.validation_result = 0
for a in range(len(self._dynamics_model)):
individual_model = self._dynamics_model.model[a]
env = individual_model.return_as_env()
batch_data = agent.sample(env=env,
sample_count=self.parameters('validation_trajectory_count'),
sample_type='trajectory',
store_flag=False)
self.result[a] = batch_data.get_mean_of('reward')
if self.result[a] > old_result[a]:
self.validation_result += 1
self.validation_result = self.validation_result / len(self._dynamics_model)
return self.validation_result
@register_counter_info_to_status_decorator(increment=1, info_key='predict_counter')
def predict(self, obs, **kwargs):
return self.model_free_algo.predict(obs)
def append_to_memory(self, *args, **kwargs):
self.model_free_algo.append_to_memory(kwargs['samples'])
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
self.model_free_algo.save(global_step=global_step,
name=None,
save_path=os.path.join(save_path, self.model_free_algo.name))
self.dynamics_env.save(global_step=global_step,
name=None,
save_path=os.path.join(save_path, self.dynamics_env.name))
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
self.model_free_algo.load(path_to_model=os.path.join(path_to_model, self.model_free_algo.name),
model_name=self.model_free_algo.name,
global_step=global_step)
self.dynamics_env.load(global_step=global_step,
path_to_model=os.path.join(path_to_model, self.dynamics_env.name),
model_name=self.dynamics_env.name)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
@register_counter_info_to_status_decorator(increment=1, info_key='dyanmics_train_counter', under_status='TRAIN')
def _fit_dynamics_model(self, batch_data: TransitionData, train_iter, sess=None) -> dict:
res_dict = self._dynamics_model.train(batch_data, **dict(sess=sess,
train_iter=train_iter))
return res_dict
@register_counter_info_to_status_decorator(increment=1, info_key='mode_free_algo_dyanmics_train_counter',
under_status='TRAIN')
def _train_model_free_algo(self, batch_data=None, train_iter=None, sess=None):
res_dict = self.model_free_algo.train(**dict(batch_data=batch_data,
train_iter=train_iter,
sess=sess))
return res_dict
| 8,184 | 50.803797 | 120 | py |
baconian-project | baconian-project-master/baconian/algo/dqn.py | from baconian.common.special import flatten_n
from baconian.algo.rl_algo import ModelFreeAlgo, OffPolicyAlgo
from baconian.config.dict_config import DictConfig
from typeguard import typechecked
from baconian.core.util import init_func_arg_record_decorator
from baconian.tf.util import *
from baconian.algo.misc.replay_buffer import UniformRandomReplayBuffer, BaseReplayBuffer, PrioritisedReplayBuffer
import tensorflow as tf
import numpy as np
from baconian.common.sampler.sample_data import TransitionData
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.config.global_config import GlobalConfig
from baconian.common.misc import *
from baconian.algo.value_func.mlp_q_value import MLPQValueFunction
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.algo.misc.placeholder_input import MultiPlaceholderInput
from baconian.common.error import *
class DQN(ModelFreeAlgo, OffPolicyAlgo, MultiPlaceholderInput):
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_DQN_REQUIRED_KEY_LIST)
@init_func_arg_record_decorator()
@typechecked
def __init__(self,
env_spec,
config_or_config_dict: (DictConfig, dict),
value_func: MLPQValueFunction,
schedule_param_list=None,
name: str = 'dqn',
replay_buffer=None):
ModelFreeAlgo.__init__(self, env_spec=env_spec, name=name)
self.config = construct_dict_config(config_or_config_dict, self)
if replay_buffer:
assert issubclass(replay_buffer, BaseReplayBuffer)
self.replay_buffer = replay_buffer
else:
self.replay_buffer = UniformRandomReplayBuffer(limit=self.config('REPLAY_BUFFER_SIZE'),
action_shape=self.env_spec.action_shape,
observation_shape=self.env_spec.obs_shape)
self.q_value_func = value_func
self.state_input = self.q_value_func.state_input
self.action_input = self.q_value_func.action_input
self.update_target_q_every_train = self.config('UPDATE_TARGET_Q_FREQUENCY') if 'UPDATE_TARGET_Q_FREQUENCY' in \
self.config.config_dict else 1
self.parameters = ParametersWithTensorflowVariable(tf_var_list=[],
rest_parameters=dict(),
to_scheduler_param_tuple=schedule_param_list,
name='{}_param'.format(name),
source_config=self.config,
require_snapshot=False)
with tf.variable_scope(name):
self.reward_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
self.next_state_input = tf.placeholder(shape=[None, self.env_spec.flat_obs_dim], dtype=tf.float32)
self.done_input = tf.placeholder(shape=[None, 1], dtype=tf.bool)
self.target_q_input = tf.placeholder(shape=[None, 1], dtype=tf.float32)
done = tf.cast(self.done_input, dtype=tf.float32)
self.target_q_value_func = self.q_value_func.make_copy(name_scope='{}_targe_q_value_net'.format(name),
name='{}_targe_q_value_net'.format(name),
reuse=False)
self.predict_q_value = (1. - done) * self.config('GAMMA') * self.target_q_input + self.reward_input
self.td_error = self.predict_q_value - self.q_value_func.q_tensor
with tf.variable_scope('train'):
self.q_value_func_loss, self.optimizer, self.update_q_value_func_op = self._set_up_loss()
self.update_target_q_value_func_op = self._set_up_target_update()
var_list = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES,
scope='{}/train'.format(name)) + self.optimizer.variables()
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
MultiPlaceholderInput.__init__(self,
sub_placeholder_input_list=[dict(obj=self.q_value_func,
attr_name='q_value_func',
),
dict(obj=self.target_q_value_func,
attr_name='target_q_value_func')],
parameters=self.parameters)
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self, sess=None, source_obj=None):
super().init()
self.q_value_func.init()
self.target_q_value_func.init(source_obj=self.q_value_func)
self.parameters.init()
if source_obj:
self.copy_from(source_obj)
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train_counter', under_status='TRAIN')
def train(self, batch_data=None, train_iter=None, sess=None, update_target=True) -> dict:
super(DQN, self).train()
self.recorder.record()
if batch_data and not isinstance(batch_data, TransitionData):
raise TypeError()
tf_sess = sess if sess else tf.get_default_session()
train_iter = self.parameters("TRAIN_ITERATION") if not train_iter else train_iter
average_loss = 0.0
for i in range(train_iter):
train_data = self.replay_buffer.sample(
batch_size=self.parameters('BATCH_SIZE')) if batch_data is None else batch_data
_, target_q_val_on_new_s = self.predict_target_with_q_val(obs=train_data.new_state_set,
batch_flag=True)
target_q_val_on_new_s = np.expand_dims(target_q_val_on_new_s, axis=1)
assert target_q_val_on_new_s.shape[0] == train_data.state_set.shape[0]
feed_dict = {
self.reward_input: np.reshape(train_data.reward_set, [-1, 1]),
self.action_input: flatten_n(self.env_spec.action_space, train_data.action_set),
self.state_input: train_data.state_set,
self.done_input: np.reshape(train_data.done_set, [-1, 1]),
self.target_q_input: target_q_val_on_new_s,
**self.parameters.return_tf_parameter_feed_dict()
}
res, _ = tf_sess.run([self.q_value_func_loss, self.update_q_value_func_op],
feed_dict=feed_dict)
average_loss += res
average_loss /= train_iter
if update_target is True and self.get_status()['train_counter'] % self.update_target_q_every_train == 0:
tf_sess.run(self.update_target_q_value_func_op,
feed_dict=self.parameters.return_tf_parameter_feed_dict())
return dict(average_loss=average_loss)
@register_counter_info_to_status_decorator(increment=1, info_key='test_counter', under_status='TEST')
def test(self, *arg, **kwargs):
return super().test(*arg, **kwargs)
@register_counter_info_to_status_decorator(increment=1, info_key='predict_counter')
def predict(self, obs: np.ndarray, sess=None, batch_flag: bool = False):
if batch_flag:
action, q_val = self._predict_batch_action(obs=obs,
q_value_tensor=self.q_value_func.q_tensor,
action_ph=self.action_input,
state_ph=self.state_input,
sess=sess)
else:
action, q_val = self._predict_action(obs=obs,
q_value_tensor=self.q_value_func.q_tensor,
action_ph=self.action_input,
state_ph=self.state_input,
sess=sess)
if not batch_flag:
return int(action)
else:
return action.astype(np.int).tolist()
def predict_target_with_q_val(self, obs: np.ndarray, sess=None, batch_flag: bool = False):
if batch_flag:
action, q_val = self._predict_batch_action(obs=obs,
q_value_tensor=self.target_q_value_func.q_tensor,
action_ph=self.target_q_value_func.action_input,
state_ph=self.target_q_value_func.state_input,
sess=sess)
else:
action, q_val = self._predict_action(obs=obs,
q_value_tensor=self.target_q_value_func.q_tensor,
action_ph=self.target_q_value_func.action_input,
state_ph=self.target_q_value_func.state_input,
sess=sess)
return action, q_val
@register_counter_info_to_status_decorator(increment=1, info_key='append_to_memory')
def append_to_memory(self, samples: TransitionData):
self.replay_buffer.append_batch(obs0=samples.state_set,
obs1=samples.new_state_set,
action=samples.action_set,
reward=samples.reward_set,
terminal1=samples.done_set)
self._status.update_info(info_key='replay_buffer_data_total_count', increment=len(samples))
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
MultiPlaceholderInput.save(self, save_path=save_path, global_step=global_step, name=name, **kwargs)
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
MultiPlaceholderInput.load(self, path_to_model, model_name, global_step, **kwargs)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
def _predict_action(self, obs: np.ndarray, q_value_tensor: tf.Tensor, action_ph: tf.Tensor, state_ph: tf.Tensor,
sess=None):
if self.env_spec.obs_space.contains(obs) is False:
raise StateOrActionOutOfBoundError("obs {} out of bound {}".format(obs, self.env_spec.obs_space.bound()))
obs = repeat_ndarray(obs, repeats=self.env_spec.flat_action_dim)
tf_sess = sess if sess else tf.get_default_session()
feed_dict = {action_ph: generate_n_actions_hot_code(n=self.env_spec.flat_action_dim),
state_ph: obs, **self.parameters.return_tf_parameter_feed_dict()}
res = tf_sess.run([q_value_tensor],
feed_dict=feed_dict)[0]
return np.argmax(res, axis=0), np.max(res, axis=0)
def _predict_batch_action(self, obs: np.ndarray, q_value_tensor: tf.Tensor, action_ph: tf.Tensor,
state_ph: tf.Tensor, sess=None):
actions = []
q_values = []
for obs_i in obs:
action, q_val = self._predict_action(obs=obs_i,
q_value_tensor=q_value_tensor,
action_ph=action_ph,
state_ph=state_ph,
sess=sess)
actions.append(np.argmax(action, axis=0))
q_values.append(np.max(q_val, axis=0))
return np.array(actions), np.array(q_values)
def _set_up_loss(self):
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.q_value_func.name_scope)
loss = tf.reduce_sum((self.predict_q_value - self.q_value_func.q_tensor) ** 2)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('LEARNING_RATE'))
optimize_op = optimizer.minimize(loss=loss, var_list=self.q_value_func.parameters('tf_var_list'))
return loss, optimizer, optimize_op
def _set_up_target_update(self):
op = []
for var, target_var in zip(self.q_value_func.parameters('tf_var_list'),
self.target_q_value_func.parameters('tf_var_list')):
ref_val = self.parameters('DECAY') * target_var + (1.0 - self.parameters('DECAY')) * var
op.append(tf.assign(target_var, ref_val))
return op
def _evaluate_td_error(self, sess=None):
# tf_sess = sess if sess else tf.get_default_session()
# feed_dict = {
# self.reward_input: train_data.reward_set,
# self.action_input: flatten_n(self.env_spec.action_space, train_data.action_set),
# self.state_input: train_data.state_set,
# self.done_input: train_data.done_set,
# self.target_q_input: target_q_val_on_new_s,
# **self.parameters.return_tf_parameter_feed_dict()
# }
# td_loss = tf_sess.run([self.td_error], feed_dict=feed_dict)
pass | 14,350 | 58.057613 | 119 | py |
baconian-project | baconian-project-master/baconian/algo/dyna.py | from baconian.algo.dynamics.dynamics_model import DynamicsModel
from typeguard import typechecked
from baconian.config.dict_config import DictConfig
from baconian.common.sampler.sample_data import TransitionData
from baconian.core.parameters import Parameters
from baconian.config.global_config import GlobalConfig
from baconian.algo.rl_algo import ModelFreeAlgo, ModelBasedAlgo
from baconian.common.misc import *
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.core.util import init_func_arg_record_decorator
from baconian.algo.misc.placeholder_input import PlaceholderInput
import os
class Dyna(ModelBasedAlgo):
"""
Dyna algorithms, Sutton, R. S. (1991).
You can replace the dynamics model with any dynamics models you want.
"""
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_ALGO_DYNA_REQUIRED_KEY_LIST)
@init_func_arg_record_decorator()
@typechecked
def __init__(self, env_spec, dynamics_model: DynamicsModel,
model_free_algo: ModelFreeAlgo,
config_or_config_dict: (DictConfig, dict),
name='sample_with_dynamics'
):
super().__init__(env_spec, dynamics_model, name)
config = construct_dict_config(config_or_config_dict, self)
parameters = Parameters(parameters=dict(),
name='dyna_param',
source_config=config)
sub_placeholder_input_list = []
if isinstance(dynamics_model, PlaceholderInput):
sub_placeholder_input_list.append(dict(obj=dynamics_model,
attr_name='dynamics_model'))
if isinstance(model_free_algo, PlaceholderInput):
sub_placeholder_input_list.append(dict(obj=model_free_algo,
attr_name='model_free_algo'))
self.model_free_algo = model_free_algo
self.config = config
self.parameters = parameters
@register_counter_info_to_status_decorator(increment=1, info_key='init', under_status='INITED')
def init(self):
self.parameters.init()
self.model_free_algo.init()
self.dynamics_env.init()
super().init()
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train_counter', under_status='TRAIN')
def train(self, *args, **kwargs) -> dict:
super(Dyna, self).train()
res_dict = {}
batch_data = kwargs['batch_data'] if 'batch_data' in kwargs else None
if 'state' in kwargs:
assert kwargs['state'] in ('state_dynamics_training', 'state_agent_training')
state = kwargs['state']
kwargs.pop('state')
else:
state = None
if not state or state == 'state_dynamics_training':
dynamics_train_res_dict = self._fit_dynamics_model(batch_data=batch_data,
train_iter=self.parameters('dynamics_model_train_iter'))
for key, val in dynamics_train_res_dict.items():
res_dict["{}_{}".format(self._dynamics_model.name, key)] = val
if not state or state == 'state_agent_training':
model_free_algo_train_res_dict = self._train_model_free_algo(batch_data=batch_data,
train_iter=self.parameters(
'model_free_algo_train_iter'))
for key, val in model_free_algo_train_res_dict.items():
res_dict['{}_{}'.format(self.model_free_algo.name, key)] = val
return res_dict
@register_counter_info_to_status_decorator(increment=1, info_key='test_counter', under_status='TEST')
def test(self, *arg, **kwargs):
return super().test(*arg, **kwargs)
@register_counter_info_to_status_decorator(increment=1, info_key='predict_counter')
def predict(self, obs, **kwargs):
return self.model_free_algo.predict(obs)
def append_to_memory(self, *args, **kwargs):
self.model_free_algo.append_to_memory(kwargs['samples'])
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
self.model_free_algo.save(global_step=global_step,
name=None,
save_path=os.path.join(save_path, self.model_free_algo.name))
self.dynamics_env.save(global_step=global_step,
name=None,
save_path=os.path.join(save_path, self.dynamics_env.name))
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
self.model_free_algo.load(path_to_model=os.path.join(path_to_model, self.model_free_algo.name),
model_name=self.model_free_algo.name,
global_step=global_step)
self.dynamics_env.load(global_step=global_step,
path_to_model=os.path.join(path_to_model, self.dynamics_env.name),
model_name=self.dynamics_env.name)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
@register_counter_info_to_status_decorator(increment=1, info_key='dyanmics_train_counter', under_status='TRAIN')
def _fit_dynamics_model(self, batch_data: TransitionData, train_iter, sess=None) -> dict:
res_dict = self._dynamics_model.train(batch_data, **dict(sess=sess,
train_iter=train_iter))
return res_dict
@register_counter_info_to_status_decorator(increment=1, info_key='mode_free_algo_dyanmics_train_counter',
under_status='TRAIN')
def _train_model_free_algo(self, batch_data=None, train_iter=None, sess=None):
res_dict = self.model_free_algo.train(**dict(batch_data=batch_data,
train_iter=train_iter,
sess=sess))
return res_dict
| 6,767 | 51.465116 | 119 | py |
baconian-project | baconian-project-master/baconian/algo/rl_algo.py | from baconian.algo.algo import Algo
from baconian.algo.dynamics.dynamics_model import DynamicsModel
from baconian.core.core import EnvSpec
from baconian.common.logging import record_return_decorator
import numpy as np
class ModelFreeAlgo(Algo):
def __init__(self, env_spec: EnvSpec, name: str = 'model_free_algo', warm_up_trajectories_number=0):
super(ModelFreeAlgo, self).__init__(env_spec, name, warm_up_trajectories_number)
class OnPolicyAlgo(Algo):
pass
class OffPolicyAlgo(Algo):
pass
class ValueBasedAlgo(Algo):
pass
class PolicyBasedAlgo(Algo):
pass
class ModelBasedAlgo(Algo):
def __init__(self, env_spec, dynamics_model: DynamicsModel, name: str = 'model_based_algo'):
super(ModelBasedAlgo, self).__init__(env_spec, name)
self._dynamics_model = dynamics_model
self.dynamics_env = self._dynamics_model.return_as_env()
def train_dynamics(self, *args, **kwargs):
pass
@record_return_decorator(which_recorder='self')
def test_dynamics(self, env, sample_count, *args, **kwargs):
self.set_status('TEST')
env.set_status('TEST')
st = env.reset()
real_state_list = []
dyanmics_state_list = []
for i in range(sample_count):
ac = self.env_spec.action_space.sample()
self._dynamics_model.reset_state(state=st)
new_state_dynamics, _, _, _ = self.dynamics_env.step(action=ac, )
new_state_real, _, done, _ = env.step(action=ac)
real_state_list.append(new_state_real)
dyanmics_state_list.append(new_state_dynamics)
st = new_state_real
if done is True:
env.reset()
l1_loss = np.linalg.norm(np.array(real_state_list) - np.array(dyanmics_state_list), ord=1)
l2_loss = np.linalg.norm(np.array(real_state_list) - np.array(dyanmics_state_list), ord=2)
return dict(dynamics_test_l1_error=l1_loss, dynamics_test_l2_error=l2_loss)
def set_terminal_reward_function_for_dynamics_env(self, terminal_func, reward_func):
self.dynamics_env.set_terminal_reward_func(terminal_func, reward_func)
| 2,158 | 34.393443 | 104 | py |
baconian-project | baconian-project-master/baconian/algo/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/algo/gps.py | """
Guided Policy Search
"""
| 29 | 6.5 | 20 | py |
baconian-project | baconian-project-master/baconian/algo/mpc.py | from baconian.algo.rl_algo import ModelBasedAlgo
from baconian.algo.dynamics.dynamics_model import DynamicsModel
from baconian.config.dict_config import DictConfig
from baconian.common.sampler.sample_data import TrajectoryData
from baconian.core.parameters import Parameters
from baconian.config.global_config import GlobalConfig
from baconian.common.misc import *
from baconian.algo.policy.policy import Policy
from baconian.common.logging import ConsoleLogger
from baconian.common.sampler.sample_data import TransitionData
from baconian.common.logging import record_return_decorator
class ModelPredictiveControl(ModelBasedAlgo):
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_MPC_REQUIRED_KEY_LIST)
def __init__(self, env_spec, dynamics_model: DynamicsModel,
config_or_config_dict: (DictConfig, dict),
policy: Policy,
name='mpc',
):
super().__init__(env_spec, dynamics_model, name)
self.config = construct_dict_config(config_or_config_dict, self)
self.policy = policy
self.parameters = Parameters(parameters=dict(),
source_config=self.config,
name=name + '_' + 'mpc_param')
self.memory = TransitionData(env_spec=env_spec)
def init(self, source_obj=None):
super().init()
self.parameters.init()
self._dynamics_model.init()
self.policy.init()
if source_obj:
self.copy_from(source_obj)
def train(self, *arg, **kwargs) -> dict:
super(ModelPredictiveControl, self).train()
res_dict = {}
batch_data = kwargs['batch_data'] if 'batch_data' in kwargs else self.memory
dynamics_train_res_dict = self._fit_dynamics_model(batch_data=batch_data,
train_iter=self.parameters('dynamics_model_train_iter'))
for key, val in dynamics_train_res_dict.items():
res_dict["mlp_dynamics_{}".format(key)] = val
return res_dict
def test(self, *arg, **kwargs) -> dict:
return super().test(*arg, **kwargs)
def _fit_dynamics_model(self, batch_data: TransitionData, train_iter, sess=None) -> dict:
res_dict = self._dynamics_model.train(batch_data, **dict(sess=sess,
train_iter=train_iter))
return res_dict
def predict(self, obs, **kwargs):
if self.is_training is True:
return self.env_spec.action_space.sample()
rollout = TrajectoryData(env_spec=self.env_spec)
state = obs
for i in range(self.parameters('SAMPLED_PATH_NUM')):
path = TransitionData(env_spec=self.env_spec)
# todo terminal_func signal problem to be consider?
for _ in range(self.parameters('SAMPLED_HORIZON')):
ac = self.policy.forward(obs=state)
new_state, re, done, _ = self.dynamics_env.step(action=ac, state=state)
path.append(state=state, action=ac, new_state=new_state, reward=re, done=done)
state = new_state
rollout.append(path)
rollout.trajectories.sort(key=lambda x: x.cumulative_reward, reverse=True)
ac = rollout.trajectories[0].action_set[0]
assert self.env_spec.action_space.contains(ac)
return ac
def append_to_memory(self, samples: TransitionData):
self.memory.union(samples)
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
self.parameters.copy_from(obj.parameters)
self._dynamics_model.copy_from(obj._dynamics_model)
ConsoleLogger().print('info', 'model: {} copied from {}'.format(self, obj))
return True
@record_return_decorator(which_recorder='self')
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
self._dynamics_model.save(save_path=save_path, global_step=global_step, name=name, **kwargs)
self.policy.save(save_path=save_path, global_step=global_step, name=name, **kwargs)
return dict(check_point_save_path=save_path, check_point_save_global_step=global_step,
check_point_save_name=name)
@record_return_decorator(which_recorder='self')
def load(self, path_to_model, model_name, global_step=None, **kwargs):
self._dynamics_model.load(path_to_model, model_name, global_step, **kwargs)
self.policy.load(path_to_model, model_name, global_step, **kwargs)
return dict(check_point_load_path=path_to_model, check_point_load_global_step=global_step,
check_point_load_name=model_name)
| 4,978 | 47.339806 | 115 | py |
baconian-project | baconian-project-master/baconian/algo/policy/ilqr_policy.py | # import numpy as np
from scipy.linalg import inv
from scipy.optimize import approx_fprime
from baconian.algo.policy.policy import DeterministicPolicy
from baconian.core.parameters import Parameters
from baconian.core.core import EnvSpec
import autograd.numpy as np
from baconian.common.special import *
from baconian.algo.dynamics.reward_func.reward_func import CostFunc
from baconian.algo.rl_algo import ModelBasedAlgo
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator
from baconian.common.sampler.sample_data import TransitionData
from baconian.algo.dynamics.dynamics_model import DynamicsEnvWrapper
"""
the gradient is computed approximated instead of analytically
"""
class iLQR(object):
def __init__(self, env_spec: EnvSpec, delta, T, dyn_model: DynamicsEnvWrapper, cost_fn: CostFunc):
self.env_spec = env_spec
self.min_factor = 2
self.factor = self.min_factor
self.min_mu = 1e-6
self.mu = self.min_mu
self.delta = delta
self.T = T
self.dyn_model = dyn_model
self.cost_fn = cost_fn
self.control_low = self.env_spec.action_space.low
self.control_high = self.env_spec.action_space.high
self.K, self.k, self.std = None, None, None
def increase(self, mu):
self.factor = np.maximum(self.factor, self.factor * self.min_factor)
self.mu = np.maximum(self.min_mu, self.mu * self.factor)
def decrease(self, mu):
self.factor = np.minimum(1 / self.min_factor, self.factor / self.min_factor)
if self.mu * self.factor > self.min_mu:
self.mu = self.mu * self.factor
else:
self.mu = 0
def simulate_step(self, x):
xu = [x[:self.env_spec.obs_space.shape[0]], x[self.env_spec.obs_space.shape[0]:]]
next_x = self.dyn_model.step(state=xu[0], action=xu[1], allow_clip=True)
# next_x = self.env_spec.obs_space.sample()
"get cost"
cost = self.cost_fn(state=xu[0], action=xu[1], new_state=next_x)
return next_x, cost
def simulate_next_state(self, x, i):
return self.simulate_step(x)[0][i]
def simulate_cost(self, x):
return self.simulate_step(x)[1]
def approx_fdoubleprime(self, x, i):
return approx_fprime(x, self.simulate_cost, self.delta)[i]
def finite_difference(self, x, u):
"calling finite difference for delta perturbation"
xu = np.concatenate((x, u))
F = np.zeros((x.shape[0], xu.shape[0]))
for i in range(x.shape[0]):
F[i, :] = approx_fprime(xu, self.simulate_next_state, self.delta, i)
c = approx_fprime(xu, self.simulate_cost, self.delta)
C = np.zeros((len(xu), len(xu)))
for i in range(xu.shape[0]):
C[i, :] = approx_fprime(xu, self.approx_fdoubleprime, self.delta, i)
f = np.zeros((len(x)))
return C, F, c, f
def differentiate(self, x_seq, u_seq):
"get gradient values using finite difference"
C, F, c, f = [], [], [], []
for t in range(self.T - 1):
Ct, Ft, ct, ft = self.finite_difference(x_seq[t], u_seq[t])
C.append(Ct)
F.append(Ft)
c.append(ct)
f.append(ft)
"TODO : C, F, c, f for time step T are different. Why ?"
u = np.zeros((u_seq[0].shape))
Ct, Ft, ct, ft = self.finite_difference(x_seq[-1], u)
C.append(Ct)
F.append(Ft)
c.append(ct)
f.append(ft)
return C, F, c, f
def backward(self, x_seq, u_seq):
"initialize F_t, C_t, f_t, c_t, V_t, v_t"
C, F, c, f = self.differentiate(x_seq, u_seq)
n = x_seq[0].shape[0]
"initialize V_t1 and v_t1"
c_x = c[-1][:n]
c_u = c[-1][n:]
C_xx = C[-1][:n, :n]
C_xu = C[-1][:n, n:]
C_ux = C[-1][n:, :n]
C_uu = C[-1][n:, n:]
# C_uu = C_uu + self.mu * np.eye(C_uu.shape[0])
K = np.zeros((self.T + 1, u_seq[0].shape[0], x_seq[0].shape[0]))
k = np.zeros((self.T + 1, u_seq[0].shape[0]))
V = np.zeros((self.T + 1, x_seq[0].shape[0], x_seq[0].shape[0]))
v = np.zeros((self.T + 1, x_seq[0].shape[0]))
# K[-1] = -np.dot(inv(C_uu), C_ux)
# k[-1] = -np.dot(inv(C_uu), c_u)
# V[-1] = C_xx + np.dot(C_xu, K[-1]) + np.dot(K[-1].T, C_ux) + np.dot(np.dot(K[-1].T, C_uu), K[-1])
# v[-1] = c_x + np.dot(C_xu, k[-1]) + np.dot(K[-1].T, c_u) + np.dot(np.dot(K[-1].T, C_uu), k[-1])
V[-1] = C_xx
v[-1] = c_x
"initialize Q_t1 and q_t1"
Q = list(np.zeros((self.T)))
q = list(np.zeros((self.T)))
self.std = []
"loop till horizon"
t = self.T - 1
while t >= 0:
# for t in range(self.T-1, -1, -1):
"update Q"
# todo remove mu here
# Q[t] = C[t] + np.dot(np.dot(F[t].T, V[t + 1] + self.mu * np.eye(V[t + 1].shape[0])),
# F[t]) # + 0.01 * np.eye(V[t+1].shape[0])),
Q[t] = C[t] + np.dot(np.dot(F[t].T, V[t + 1]), F[t])
q[t] = c[t] + np.dot(F[t].T, v[t + 1]) + np.dot(np.dot(F[t].T, V[t + 1]), f[t])
"differentiate Q to get Q_uu, Q_xx, Q_ux, Q_u, Q_x"
q_x = q[t][:n]
q_u = q[t][n:]
Q_xx = Q[t][:n, :n]
Q_xu = Q[t][:n, n:]
Q_ux = Q[t][n:, :n]
Q_uu = Q[t][n:, n:]
# Q_uu = Q_uu + 100 * np.eye(Q_uu.shape[0])
# try:
# np.linalg.cholesky(Q_uu)
# except:
# print(self.mu)
# self.increase(self.mu)
# continue
"update K, k, V, v"
# print("q_uu", Q_uu)
K[t] = -np.dot(inv(Q_uu), Q_ux)
k[t] = -np.dot(inv(Q_uu), q_u)
V[t] = Q_xx + np.dot(Q_xu, K[t]) + np.dot(K[t].T, Q_ux) + np.dot(np.dot(K[t].T, Q_uu), K[t])
v[t] = q_x + np.dot(Q_xu, k[t]) + np.dot(K[t].T, q_u) + np.dot(np.dot(K[t].T, Q_uu), k[t])
self.std.append(inv(Q_uu))
# self.decrease(self.mu)
t -= 1
self.K = K
self.k = k
def get_action_one_step(self, state, t, x, u):
"TODO : Add delta U's to given action array"
# print("Q_uu ", self.std[t])
mean = np.dot(self.K[t], (state - x)) + self.k[t] + u
# return np.clip(mean, self.control_low, self.control_high)
# todo remove random here
if np.isclose(0.0, self.std[t]).all() is False:
return np.clip(np.random.normal(mean, self.std[t]), self.control_low, self.control_high)
else:
return np.clip(mean, self.control_low, self.control_high)
class iLQRPolicy(DeterministicPolicy):
@typechecked
def __init__(self, env_spec: EnvSpec, T: int, delta: float, iteration: int, cost_fn: CostFunc,
dynamics_model_train_iter: int,
dynamics: DynamicsEnvWrapper):
param = Parameters(parameters=dict(T=T, delta=delta,
iteration=iteration,
dynamics_model_train_iter=dynamics_model_train_iter))
super().__init__(env_spec, param)
self.dynamics = dynamics
self.U_hat = None
self.X_hat = None
self.iLqr_instance = iLQR(env_spec=env_spec,
delta=self.parameters('delta'),
T=self.parameters('T'),
dyn_model=dynamics._dynamics,
cost_fn=cost_fn)
def forward(self, obs, **kwargs):
obs = make_batch(obs, original_shape=self.env_spec.obs_shape).tolist()
action = []
if 'step' in kwargs:
step = kwargs['step']
else:
step = None
for obs_i in obs:
action_i = self._forward(obs_i, step=step)
action.append(action_i)
return np.array(action)
def copy_from(self, obj) -> bool:
super().copy_from(obj)
self.parameters.copy_from(obj.parameters)
return True
def make_copy(self, *args, **kwargs):
dynamics = DynamicsEnvWrapper(dynamics=self.dynamics._dynamics)
dynamics.set_terminal_reward_func(terminal_func=self.dynamics._terminal_func,
reward_func=self.dynamics._reward_func)
return iLQRPolicy(env_spec=self.env_spec,
T=self.parameters('T'),
delta=self.parameters('delta'),
iteration=self.parameters('iteration'),
cost_fn=self.iLqr_instance.cost_fn,
dynamics_model_train_iter=self.parameters('dynamics_model_train_iter'),
dynamics=dynamics)
def init(self, source_obj=None):
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
def get_status(self):
return super().get_status()
def _forward(self, obs, step: None):
if not step:
self.U_hat = np.reshape([np.zeros(self.action_space.sample().shape) for _ in range(self.T - 1)],
(self.T - 1, self.action_space.shape[0]))
self.X_hat = []
self.X_hat.append(obs)
x = obs
for i in range(self.T - 1):
next_obs, _, _, _ = self.dynamics.step(action=self.U_hat[i, :], state=x, allow_clip=True)
self.X_hat.append(next_obs)
x = next_obs
self.X_hat = np.array(self.X_hat)
for i in range(self.parameters('iteration')):
self.iLqr_instance.backward(self.X_hat, self.U_hat)
x = obs
U = np.zeros(self.U_hat.shape)
X = np.zeros(self.X_hat.shape)
for t in range(self.T - 1):
u = self.iLqr_instance.get_action_one_step(x, t, self.X_hat[t], self.U_hat[t])
X[t] = x
U[t] = u
x, _, _, _ = self.dynamics.step(state=x, action=u, allow_clip=True)
X[-1] = x
self.X_hat = X
self.U_hat = U
return self.U_hat[0]
@property
def T(self):
return self.parameters('T')
class iLQRAlogWrapper(ModelBasedAlgo):
def __init__(self, policy, env_spec, dynamics_env: DynamicsEnvWrapper, name: str = 'model_based_algo'):
self.policy = policy
super().__init__(env_spec, dynamics_env._dynamics, name)
self.dynamics_env = dynamics_env
def predict(self, obs, **kwargs):
if self.is_training is True:
return self.env_spec.action_space.sample()
else:
return self.policy.forward(obs=obs)
def append_to_memory(self, *args, **kwargs):
pass
def init(self):
self.policy.init()
self.dynamics_env.init()
super().init()
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train_counter', under_status='TRAIN')
def train(self, *args, **kwargs) -> dict:
super(iLQRAlogWrapper, self).train()
res_dict = {}
batch_data = kwargs['batch_data'] if 'batch_data' in kwargs else None
if 'state' in kwargs:
assert kwargs['state'] in ('state_dynamics_training', 'state_agent_training')
state = kwargs['state']
kwargs.pop('state')
else:
state = None
if not state or state == 'state_dynamics_training':
dynamics_train_res_dict = self._fit_dynamics_model(batch_data=batch_data,
train_iter=self.policy.parameters(
'dynamics_model_train_iter'))
for key, val in dynamics_train_res_dict.items():
res_dict["{}_{}".format(self._dynamics_model.name, key)] = val
return res_dict
@register_counter_info_to_status_decorator(increment=1, info_key='dyanmics_train_counter', under_status='TRAIN')
def _fit_dynamics_model(self, batch_data: TransitionData, train_iter, sess=None) -> dict:
res_dict = self._dynamics_model.train(batch_data, **dict(sess=sess,
train_iter=train_iter))
return res_dict
| 12,553 | 34.165266 | 116 | py |
baconian-project | baconian-project-master/baconian/algo/policy/normal_distribution_mlp.py | from baconian.algo.policy.policy import StochasticPolicy
from baconian.core.core import EnvSpec
import overrides
import tensorflow as tf
from baconian.tf.mlp import MLP
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.common.special import *
from baconian.tf.util import *
from baconian.algo.utils import _get_copy_arg_with_tf_reuse
from baconian.algo.misc.placeholder_input import PlaceholderInput
import baconian.algo.distribution.mvn as mvn
"""
logvar and logvar_speed is referred from https://github.com/pat-coady/trpo
"""
class NormalDistributionMLPPolicy(StochasticPolicy, PlaceholderInput):
def __init__(self, env_spec: EnvSpec,
name: str,
name_scope: str,
mlp_config: list,
input_norm: np.ndarray = None,
output_norm: np.ndarray = None,
output_low: np.ndarray = None,
output_high: np.ndarray = None,
reuse=False,
distribution_tensors_tuple: tuple = None
):
StochasticPolicy.__init__(self, env_spec=env_spec, name=name, parameters=None)
obs_dim = env_spec.flat_obs_dim
action_dim = env_spec.flat_action_dim
assert action_dim == mlp_config[-1]['N_UNITS']
self.mlp_config = mlp_config
self.input_norm = input_norm
self.output_norm = output_norm
self.output_low = output_low
self.output_high = output_high
self.mlp_config = mlp_config
self.name_scope = name_scope
mlp_kwargs = dict(
reuse=reuse,
input_norm=input_norm,
output_norm=output_norm,
output_low=output_low,
output_high=output_high,
mlp_config=mlp_config,
name_scope=name_scope
)
ph_inputs = []
if distribution_tensors_tuple is not None:
self.mean_output = distribution_tensors_tuple[0][0]
self.logvar_output = distribution_tensors_tuple[1][0]
assert list(self.mean_output.shape)[-1] == action_dim
assert list(self.logvar_output.shape)[-1] == action_dim
self.mlp_net = None
else:
with tf.variable_scope(name_scope):
self.state_input = tf.placeholder(shape=[None, obs_dim], dtype=tf.float32, name='state_ph')
ph_inputs.append(self.state_input)
self.mlp_net = MLP(input_ph=self.state_input,
net_name='normal_distribution_mlp_policy',
**mlp_kwargs)
self.mean_output = self.mlp_net.output
with tf.variable_scope(name_scope, reuse=reuse):
with tf.variable_scope('norm_dist', reuse=reuse):
logvar_speed = (10 * self.mlp_config[-2]['N_UNITS']) // 48
logvar_output = tf.get_variable(name='normal_distribution_variance',
shape=[logvar_speed, self.mlp_config[-1]['N_UNITS']],
dtype=tf.float32)
self.logvar_output = tf.reduce_sum(logvar_output, axis=0)
with tf.variable_scope(name_scope, reuse=reuse):
self.action_input = tf.placeholder(shape=[None, action_dim], dtype=tf.float32, name='action_ph')
ph_inputs.append(self.action_input)
with tf.variable_scope('norm_dist', reuse=reuse):
self.stddev_output = tf.exp(self.logvar_output / 2.0, name='std_dev')
self.var_output = tf.exp(self.logvar_output, name='variance')
self.action_output = mvn.sample(mean_p=self.mean_output,
var_p=self.var_output,
dims=self.action_space.flat_dim)
var_list = get_tf_collection_var_list(scope='{}/norm_dist'.format(name_scope))
if self.mlp_net:
var_list += self.mlp_net.var_list
self.parameters = ParametersWithTensorflowVariable(tf_var_list=sorted(list(set(var_list)),
key=lambda x: x.name),
rest_parameters=dict(
state_input=self.state_input,
action_input=self.action_input,
**mlp_kwargs
),
name='normal_distribution_mlp_tf_param')
PlaceholderInput.__init__(self, parameters=self.parameters)
self._log_prob = None
self._prob = None
self._entropy = None
@overrides.overrides
def forward(self, obs: (np.ndarray, list), sess=None, feed_dict=None, **kwargs):
obs = make_batch(obs, original_shape=self.env_spec.obs_shape)
feed_dict = feed_dict if feed_dict is not None else dict()
feed_dict = {
**feed_dict,
self.state_input: obs,
**self.parameters.return_tf_parameter_feed_dict()
}
sess = sess if sess else tf.get_default_session()
res = sess.run(self.action_output, feed_dict=feed_dict)
res = np.clip(res, a_min=self.env_spec.action_space.low, a_max=self.env_spec.action_space.high)
return res
@overrides.overrides
def copy_from(self, obj) -> bool:
return PlaceholderInput.copy_from(self, obj)
def make_copy(self, **kwargs):
kwargs = _get_copy_arg_with_tf_reuse(obj=self, kwargs=kwargs)
copy_mlp_policy = NormalDistributionMLPPolicy(env_spec=self.env_spec,
input_norm=self.input_norm,
output_norm=self.output_norm,
output_low=self.output_low,
output_high=self.output_high,
mlp_config=self.mlp_config,
**kwargs)
return copy_mlp_policy
def compute_dist_info(self, name, sess=None, **kwargs) -> np.ndarray:
# todo to be optimized, every time call the function will add computing tensor into the graph
dist_info_map = {
'log_prob': self.log_prob,
'prob': self.prob,
'entropy': self.entropy,
'kl': self.kl,
}
if name not in dist_info_map.keys():
raise ValueError("only support compute {} info".format(list(dist_info_map.keys())))
sess = sess if sess else tf.get_default_session()
if name in ['log_prob', 'prob']:
if 'value' not in kwargs:
raise ValueError('To compute {}, pass the parameter with key {} and type {} in'.format(name, 'value',
np.ndarray.__name__))
assert isinstance(kwargs['value'], np.ndarray)
if name == 'kl':
if 'other' not in kwargs:
raise ValueError('To compute {}, pass the parameter with key {} and type {} in'.format(name, 'other',
type(
self).__name__))
assert isinstance(kwargs['other'], type(self))
if 'feed_dict' in kwargs:
feed_dict = kwargs['feed_dict'] if 'feed_dict' in kwargs else None
kwargs.pop('feed_dict')
else:
feed_dict = None
return sess.run(dist_info_map[name](**kwargs), feed_dict=feed_dict)
def kl(self, other, *args, **kwargs) -> tf.Tensor:
if not isinstance(other, type(self)):
raise TypeError()
return mvn.kl(mean_p=self.mean_output, var_p=self.var_output,
mean_q=other.mean_output, var_q=other.var_output, dims=self.action_space.flat_dim)
def log_prob(self, *args, **kwargs) -> tf.Tensor:
if self._log_prob is None:
self._log_prob = mvn.log_prob(variable_ph=self.action_input, mean_p=self.mean_output, var_p=self.var_output)
return self._log_prob
def prob(self, *args, **kwargs) -> tf.Tensor:
if self._prob is None:
self._prob = mvn.prob(variable_ph=self.action_input, mean_p=self.mean_output, var_p=self.var_output)
return self._prob
def entropy(self, *args, **kwargs) -> tf.Tensor:
if self._entropy is None:
self._entropy = mvn.entropy(self.mean_output, self.var_output, dims=self.action_space.flat_dim)
return self._entropy
def get_dist_info(self) -> tuple:
res = (
dict(shape=tuple(self.mean_output.shape.as_list()),
name='mean_output',
obj=self.mean_output,
dtype=self.mean_output.dtype),
dict(shape=tuple(self.logvar_output.shape.as_list()),
name='logvar_output',
obj=self.logvar_output,
dtype=self.logvar_output.dtype),
)
for re in res:
attr = getattr(self, re['name'])
if id(attr) != id(re['obj']):
raise ValueError('key name {} should be same as the obj {} name'.format(re['name'], re['obj']))
return res
def save(self, *args, **kwargs):
return PlaceholderInput.save(self, *args, **kwargs)
def load(self, *args, **kwargs):
return PlaceholderInput.load(self, *args, **kwargs)
if __name__ == '__main__':
from baconian.test.tests.test_rl.test_policy.test_mlp_norm_policy import TestNormalDistMLPPolicy
import unittest
unittest.TestLoader().loadTestsFromTestCase(TestNormalDistMLPPolicy)
unittest.main()
| 10,149 | 47.104265 | 124 | py |
baconian-project | baconian-project-master/baconian/algo/policy/policy.py | from baconian.core.core import Basic, EnvSpec
import typeguard as tg
from baconian.core.parameters import Parameters
import abc
class Policy(Basic):
@tg.typechecked
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, name='policy'):
super().__init__(name=name)
self.env_spec = env_spec
self.parameters = parameters
@property
def obs_space(self):
return self.env_spec.obs_space
@property
def action_space(self):
return self.env_spec.action_space
@abc.abstractmethod
def forward(self, *args, **kwargs):
raise NotImplementedError
@tg.typechecked
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
return True
def make_copy(self, *args, **kwargs):
raise NotImplementedError
def init(self, source_obj=None):
if self.parameters:
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
class StochasticPolicy(Policy):
@tg.typechecked
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, name: str = 'stochastic_policy'):
super(StochasticPolicy, self).__init__(env_spec=env_spec, parameters=parameters, name=name)
self.state_input = None
self.action_output = None
def log_prob(self, *args, **kwargs):
pass
def prob(self, *args, **kwargs):
pass
def kl(self, other, *kwargs):
pass
def entropy(self, *args, **kwargs):
pass
def get_dist_info(self) -> tuple:
pass
class DeterministicPolicy(Policy):
pass
| 1,741 | 24.617647 | 110 | py |
baconian-project | baconian-project-master/baconian/algo/policy/__init__.py | from .policy import Policy, StochasticPolicy, DeterministicPolicy
from .constant_action_policy import ConstantActionPolicy
from .random_policy import UniformRandomPolicy
from .normal_distribution_mlp import NormalDistributionMLPPolicy
from .lqr_policy import LQRPolicy
from .ilqr_policy import iLQRPolicy
from .deterministic_mlp import DeterministicMLPPolicy
| 359 | 44 | 65 | py |
baconian-project | baconian-project-master/baconian/algo/policy/random_policy.py | from baconian.core.core import EnvSpec
from baconian.algo.policy.policy import Policy
from typeguard import typechecked
from overrides.overrides import overrides
import numpy as np
class UniformRandomPolicy(Policy):
@typechecked
def __init__(self, env_spec: EnvSpec, name: str = 'random_policy'):
super().__init__(env_spec=env_spec, name=name)
def forward(self, obs, **kwargs):
return np.array(self.action_space.sample())
def save(self, global_step, save_path=None, name=None, **kwargs):
pass
def load(self, path_to_model, model_name, global_step=None, **kwargs):
pass
| 627 | 27.545455 | 74 | py |
baconian-project | baconian-project-master/baconian/algo/policy/lqr_policy.py | # import numpy as np
from scipy.linalg import inv
from baconian.algo.policy.policy import DeterministicPolicy
from baconian.core.parameters import Parameters
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.linear_dynamics_model import LinearDynamicsModel
import autograd.numpy as np
from baconian.common.special import *
from baconian.algo.dynamics.reward_func.reward_func import CostFunc, QuadraticCostFunc
class LQR(object):
def __init__(self, env_spec: EnvSpec, T, dyna_model: LinearDynamicsModel, cost_fn: QuadraticCostFunc):
self.env_spec = env_spec
self.T = T
self.dyn_model = dyna_model
self.cost_fn = cost_fn
self.control_low = self.env_spec.action_space.low
self.control_high = self.env_spec.action_space.high
self.K, self.k, self.std = None, None, None
self.C = np.repeat(np.expand_dims(self.cost_fn.C, axis=0), [self.T], axis=0)
self.F = np.repeat(np.expand_dims(self.dyn_model.F, axis=0), [self.T], axis=0)
self.c = np.repeat(np.expand_dims(self.cost_fn.c, axis=0), [self.T], axis=0)
self.f = np.repeat(np.expand_dims(self.dyn_model.f, axis=0), [self.T], axis=0)
def differentiate(self):
"get gradient values using finite difference"
C = np.repeat(np.expand_dims(self.cost_fn.C, axis=0), [self.T, 1, 1])
F = np.repeat(np.expand_dims(self.dyn_model.F, axis=0), [self.T, 1, 1])
c = np.repeat(np.expand_dims(self.cost_fn.c, axis=0), (self.T, 1))
f = np.repeat(np.expand_dims(self.dyn_model.f, axis=0), (self.T, 1))
return C, F, c, f
def backward(self, x_seq, u_seq):
"initialize F_t, C_t, f_t, c_t, V_t, v_t"
# todo C, F, c, f can be analytical set here
C, F, c, f = self.C, self.F, self.c, self.f
n = x_seq[0].shape[0]
"initialize V_t1 and v_t1"
c_x = c[-1][:n]
c_u = c[-1][n:]
C_xx = C[-1][:n, :n]
C_xu = C[-1][:n, n:]
C_ux = C[-1][n:, :n]
C_uu = C[-1][n:, n:]
# C_uu = C_uu + self.mu * np.eye(C_uu.shape[0])
K = np.zeros((self.T + 1, u_seq[0].shape[0], x_seq[0].shape[0]))
k = np.zeros((self.T + 1, u_seq[0].shape[0]))
V = np.zeros((self.T + 1, x_seq[0].shape[0], x_seq[0].shape[0]))
v = np.zeros((self.T + 1, x_seq[0].shape[0]))
# K[-1] = -np.dot(inv(C_uu), C_ux)
# k[-1] = -np.dot(inv(C_uu), c_u)
# V[-1] = C_xx + np.dot(C_xu, K[-1]) + np.dot(K[-1].T, C_ux) + np.dot(np.dot(K[-1].T, C_uu), K[-1])
# v[-1] = c_x + np.dot(C_xu, k[-1]) + np.dot(K[-1].T, c_u) + np.dot(np.dot(K[-1].T, C_uu), k[-1])
V[-1] = C_xx
v[-1] = c_x
"initialize Q_t1 and q_t1"
Q = list(np.zeros((self.T)))
q = list(np.zeros((self.T)))
self.std = []
"loop till horizon"
t = self.T - 1
while t >= 0:
Q[t] = C[t] + np.dot(np.dot(F[t].T, V[t + 1]), F[t])
q[t] = c[t] + np.dot(F[t].T, v[t + 1]) + np.dot(np.dot(F[t].T, V[t + 1]), f[t])
"differentiate Q to get Q_uu, Q_xx, Q_ux, Q_u, Q_x"
q_x = q[t][:n]
q_u = q[t][n:]
Q_xx = Q[t][:n, :n]
Q_xu = Q[t][:n, n:]
Q_ux = Q[t][n:, :n]
Q_uu = Q[t][n:, n:]
"update K, k, V, v"
# print("q_uu", Q_uu)
K[t] = -np.dot(inv(Q_uu), Q_ux)
k[t] = -np.dot(inv(Q_uu), q_u)
V[t] = Q_xx + np.dot(Q_xu, K[t]) + np.dot(K[t].T, Q_ux) + np.dot(np.dot(K[t].T, Q_uu), K[t])
v[t] = q_x + np.dot(Q_xu, k[t]) + np.dot(K[t].T, q_u) + np.dot(np.dot(K[t].T, Q_uu), k[t])
self.std.append(inv(Q_uu))
t -= 1
self.K = K
self.k = k
def get_action_one_step(self, state, t):
mean = np.dot(self.K[t], state) + self.k[t]
# todo remove random here
if np.isclose(0.0, self.std[t]).all() is False:
return np.clip(np.random.normal(mean, self.std[t]), self.control_low, self.control_high)
else:
return np.clip(mean, self.control_low, self.control_high)
class LQRPolicy(DeterministicPolicy):
@typechecked
def __init__(self, env_spec: EnvSpec, T: int, cost_fn: CostFunc,
dynamics: LinearDynamicsModel):
param = Parameters(parameters=dict(T=T))
super().__init__(env_spec, param)
self.dynamics = dynamics
self.Lqr_instance = LQR(env_spec=env_spec,
T=self.parameters('T'),
dyna_model=dynamics,
cost_fn=cost_fn)
def forward(self, obs, **kwargs):
obs = make_batch(obs, original_shape=self.env_spec.obs_shape).tolist()
action = []
if 'step' in kwargs:
step = kwargs['step']
else:
step = None
for obs_i in obs:
action_i = self._forward(obs_i, step=step)
action.append(action_i)
return np.array(action)
def copy_from(self, obj) -> bool:
super().copy_from(obj)
self.parameters.copy_from(obj.parameters)
return True
def make_copy(self, *args, **kwargs):
return LQRPolicy(env_spec=self.env_spec,
T=self.parameters('T'),
cost_fn=self.Lqr_instance.cost_fn,
dynamics=self.dynamics.make_copy())
def init(self, source_obj=None):
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
def get_status(self):
return super().get_status()
def _forward(self, obs, step: None):
self.Lqr_instance.backward(np.array([obs]), np.array([self.action_space.sample()]))
x = obs
u = self.Lqr_instance.get_action_one_step(x, t=step if step else 0)
return u
@property
def T(self):
return self.parameters('T')
| 5,954 | 33.224138 | 107 | py |
baconian-project | baconian-project-master/baconian/algo/policy/deterministic_mlp.py | from baconian.algo.policy.policy import DeterministicPolicy
from baconian.core.core import EnvSpec
import overrides
import tensorflow as tf
from baconian.tf.mlp import MLP
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.common.special import *
from baconian.algo.utils import _get_copy_arg_with_tf_reuse
from baconian.algo.misc.placeholder_input import PlaceholderInput
class DeterministicMLPPolicy(DeterministicPolicy, PlaceholderInput):
def __init__(self, env_spec: EnvSpec,
name_scope: str,
name: str, mlp_config: list,
input_norm: np.ndarray = None,
output_norm: np.ndarray = None,
output_low: np.ndarray = None,
output_high: np.ndarray = None,
reuse=False):
DeterministicPolicy.__init__(self, env_spec=env_spec, name=name, parameters=None)
obs_dim = env_spec.flat_obs_dim
action_dim = env_spec.flat_action_dim
assert action_dim == mlp_config[-1]['N_UNITS']
with tf.variable_scope(name_scope):
state_input = tf.placeholder(shape=[None, obs_dim], dtype=tf.float32, name='state_ph')
mlp_kwargs = dict(
reuse=reuse,
input_norm=input_norm,
output_norm=output_norm,
output_low=output_low,
output_high=output_high,
mlp_config=mlp_config,
name_scope=name_scope
)
mlp_net = MLP(input_ph=state_input,
**mlp_kwargs,
net_name='deterministic_mlp_policy')
PlaceholderInput.__init__(self, parameters=None)
self.parameters = ParametersWithTensorflowVariable(tf_var_list=mlp_net.var_list,
rest_parameters=mlp_kwargs,
name='deterministic_mlp_policy_tf_param')
self.state_input = state_input
self.mlp_net = mlp_net
self.action_tensor = mlp_net.output
self.mlp_config = mlp_config
self.mlp_config = mlp_config
self.input_norm = input_norm
self.output_norm = output_norm
self.output_low = output_low
self.output_high = output_high
self.name_scope = name_scope
def forward(self, obs: (np.ndarray, list), sess=None, feed_dict=None, **kwargs):
obs = make_batch(obs, original_shape=self.env_spec.obs_shape)
feed_dict = {} if feed_dict is None else feed_dict
feed_dict = {
**feed_dict,
self.state_input: obs,
**self.parameters.return_tf_parameter_feed_dict()
}
sess = sess if sess else tf.get_default_session()
res = sess.run(self.action_tensor, feed_dict=feed_dict)
res = np.clip(res, a_min=self.env_spec.action_space.low, a_max=self.env_spec.action_space.high)
return res
def copy_from(self, obj) -> bool:
return PlaceholderInput.copy_from(self, obj)
def make_copy(self, *args, **kwargs):
kwargs = _get_copy_arg_with_tf_reuse(obj=self, kwargs=kwargs)
copy_mlp_policy = DeterministicMLPPolicy(env_spec=self.env_spec,
input_norm=self.input_norm,
output_norm=self.output_norm,
output_low=self.output_low,
output_high=self.output_high,
mlp_config=self.mlp_config,
**kwargs)
return copy_mlp_policy
def save(self, *args, **kwargs):
return PlaceholderInput.save(self, *args, **kwargs)
def load(self, *args, **kwargs):
return PlaceholderInput.load(self, *args, **kwargs)
| 3,897 | 41.369565 | 103 | py |
baconian-project | baconian-project-master/baconian/algo/policy/constant_action_policy.py | from baconian.algo.policy.policy import DeterministicPolicy
from baconian.core.core import EnvSpec
from baconian.core.parameters import Parameters
from baconian.config.global_config import GlobalConfig
from baconian.config.dict_config import DictConfig
from baconian.common.misc import *
from copy import deepcopy
class ConstantActionPolicy(DeterministicPolicy):
required_key_dict = DictConfig.load_json(file_path=GlobalConfig().DEFAULT_CONSTANT_ACTION_POLICY_REQUIRED_KEY_LIST)
def __init__(self, env_spec: EnvSpec, config_or_config_dict: (DictConfig, dict), name='policy'):
config = construct_dict_config(config_or_config_dict, self)
parameters = Parameters(parameters=dict(),
source_config=config)
assert env_spec.action_space.contains(x=config('ACTION_VALUE'))
super().__init__(env_spec, parameters, name)
self.config = config
def forward(self, *args, **kwargs):
action = self.env_spec.action_space.unflatten(self.parameters('ACTION_VALUE'))
assert self.env_spec.action_space.contains(x=action)
return action
def copy_from(self, obj) -> bool:
super().copy_from(obj)
self.parameters.copy_from(obj.parameters)
return True
def make_copy(self, *args, **kwargs):
return ConstantActionPolicy(env_spec=self.env_spec,
config_or_config_dict=deepcopy(self.config),
*args, **kwargs)
| 1,502 | 41.942857 | 119 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/dynamics_model.py | import tensorflow as tf
from baconian.core.core import Basic, EnvSpec
import numpy as np
import abc
from baconian.core.parameters import Parameters
from typeguard import typechecked
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian as tf_batch_jacobian
from baconian.common.logging import Recorder
from baconian.core.status import register_counter_info_to_status_decorator, StatusWithSingleInfo
from baconian.common.logging import ConsoleLogger
from baconian.common.error import *
from baconian.core.core import EnvSpec, Env
from baconian.algo.dynamics.reward_func.reward_func import RewardFunc
from baconian.algo.dynamics.terminal_func.terminal_func import TerminalFunc
from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler
class DynamicsModel(Basic):
STATUS_LIST = ('CREATED', 'INITED')
INIT_STATUS = 'CREATED'
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, init_state=None, name='dynamics_model',
state_input_scaler: DataScaler = None,
action_input_scaler: DataScaler = None,
state_output_scaler: DataScaler = None):
"""
:param env_spec: environment specifications, such as observation space and action space
:type env_spec: EnvSpec
:param parameters: parameters
:type parameters: Parameters
:param init_state: initial state of dymamics model
:type init_state: str
:param name: name of instance, 'dynamics_model' by default
:type name: str
:param state_input_scaler: data preprocessing scaler of state input
:type state_input_scaler: DataScaler
:param action_input_scaler: data preprocessing scaler of action input
:type action_input_scaler: DataScaler
:param state_output_scaler: data preprocessing scaler of state output
:type state_output_scaler: DataScaler
"""
super().__init__(name=name)
self.env_spec = env_spec
self.state = init_state
self.parameters = parameters
self.state_input = None
self.action_input = None
self.new_state_output = None
self.recorder = Recorder(flush_by_split_status=False, default_obj=self)
self._status = StatusWithSingleInfo(obj=self)
self.state_input_scaler = state_input_scaler if state_input_scaler else IdenticalDataScaler(
dims=env_spec.flat_obs_dim)
self.action_input_scaler = action_input_scaler if action_input_scaler else IdenticalDataScaler(
dims=env_spec.flat_action_dim)
self.state_output_scaler = state_output_scaler if state_output_scaler else IdenticalDataScaler(
dims=env_spec.flat_obs_dim)
def init(self, *args, **kwargs):
self.set_status('INITED')
self.state = self.env_spec.obs_space.sample()
@register_counter_info_to_status_decorator(increment=1, info_key='step_counter')
def step(self, action: np.ndarray, state=None, allow_clip=False, **kwargs_for_transit):
"""
State transition function (only support one sample transition instead of batch data)
:param action: action to be taken
:type action: np.ndarray
:param state: current state, if None, will use stored state (saved from last transition)
:type state: np.ndarray
:param allow_clip: allow clip of observation space, default False
:type allow_clip: bool
:param kwargs_for_transit: extra kwargs for calling the _state_transit, this is typically related to the
specific mode you used
:type kwargs_for_transit:
:return: new state after step
:rtype: np.ndarray
"""
state = np.array(state).reshape(self.env_spec.obs_shape) if state is not None else self.state
action = action.reshape(self.env_spec.action_shape)
if allow_clip is True:
if state is not None:
state = self.env_spec.obs_space.clip(state)
action = self.env_spec.action_space.clip(action)
if self.env_spec.action_space.contains(action) is False:
raise StateOrActionOutOfBoundError(
'action {} out of bound of {}'.format(action, self.env_spec.action_space.bound()))
if self.env_spec.obs_space.contains(state) is False:
raise StateOrActionOutOfBoundError(
'state {} out of bound of {}'.format(state, self.env_spec.obs_space.bound()))
new_state = self._state_transit(state=state, action=self.env_spec.flat_action(action),
**kwargs_for_transit)
if allow_clip is True:
new_state = self.env_spec.obs_space.clip(new_state)
if self.env_spec.obs_space.contains(new_state) is False:
raise StateOrActionOutOfBoundError(
'new state {} out of bound of {}'.format(new_state, self.env_spec.obs_space.bound()))
self.state = new_state
return new_state
@abc.abstractmethod
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
"""
:param state: original state
:type state: np.ndarray
:param action: action taken by agent
:type action: np.ndarray
:param kwargs:
:type kwargs:
:return: new state after transition
:rtype: np.ndarray
"""
raise NotImplementedError
def copy_from(self, obj) -> bool:
"""
:param obj: object to copy from
:type obj:
:return: True if successful else raise an error
:rtype: bool
"""
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
return True
def make_copy(self):
""" Make a copy of parameters and environment specifications."""
raise NotImplementedError
def reset_state(self, state=None):
"""
:param state: original state
:type state: np.ndarray
:return: a random sample space in observation space
:rtype: np.ndarray
"""
if state is not None:
assert self.env_spec.obs_space.contains(state)
self.state = state
else:
self.state = self.env_spec.obs_space.sample()
def return_as_env(self) -> Env:
"""
:return: an environment with this dynamics model
:rtype: DynamicsEnvWrapper
"""
return DynamicsEnvWrapper(dynamics=self,
name=self._name + '_env')
class LocalDyanmicsModel(DynamicsModel):
pass
class GlobalDynamicsModel(DynamicsModel):
pass
class TrainableDyanmicsModel(object):
def train(self, *args, **kwargs):
raise NotImplementedError
class DifferentiableDynamics(object):
@typechecked
def __init__(self, input_node_dict: dict, output_node_dict: dict):
for node in input_node_dict.values():
if not isinstance(node, tf.Tensor):
raise TypeError('Derivable only support tf.Tensor as node')
for node in output_node_dict.values():
if not isinstance(node, tf.Tensor):
raise TypeError('Derivable only support tf.Tensor as node')
self.input_node_dict = input_node_dict
self.output_node_dict = output_node_dict
self.output_node_list = []
for key in output_node_dict.keys():
self.output_node_list.append(output_node_dict[key])
self._grad_dict = [{}, {}, {}]
for val in input_node_dict:
self._grad_dict[0][val] = self.output_node_list
def grad_on_input(self, key_or_node: (str, tf.Tensor), order=1, batch_flag=False):
if batch_flag:
raise NotImplementedError
node = key_or_node if isinstance(key_or_node, tf.Tensor) else self.input_node_dict[key_or_node]
if node not in self._grad_dict:
if order == 1:
grad_op = [tf_batch_jacobian(output=o_node, inp=node) for o_node in self.output_node_list]
else:
grad_op = [self.split_and_hessian(out_node=o_node, innode=node) for o_node in self.output_node_list]
self._grad_dict[order][node] = grad_op
return grad_op
else:
return self._grad_dict[order][node]
def split_and_hessian(self, out_node, innode):
out_nodes = tf.split(out_node, 1, axis=1)
hessian_node = []
for o_node in out_nodes:
hessian_node.append(tf.stack(tf.hessians(o_node, innode)))
new_dim = len(hessian_node[0].shape.as_list()) + 1
new_dim = list(range(new_dim))
new_dim[0] = 1
new_dim[1] = 0
return tf.transpose(tf.stack(hessian_node), perm=new_dim)
class DynamicsEnvWrapper(Env):
"""
A wrapper that wrap the dynamics into a standard baconian env
"""
@typechecked
def __init__(self, dynamics: DynamicsModel, name: str = 'dynamics_env'):
super().__init__(name)
self._dynamics = dynamics
self._reward_func = None
self._terminal_func = None
self.env_spec = dynamics.env_spec
def step(self, action: np.ndarray, **kwargs):
super().step(action)
state = self.get_state() if 'state' not in kwargs else kwargs['state']
new_state = self._dynamics.step(action=action, **kwargs)
re = self._reward_func(state=state, new_state=new_state, action=action)
terminal = self._terminal_func(state=state, action=action, new_state=new_state)
return new_state, re, terminal, ()
def reset(self):
super(DynamicsEnvWrapper, self).reset()
self._dynamics.reset_state()
return self.get_state()
def init(self):
super().init()
self._dynamics.init()
def get_state(self):
return self._dynamics.state
def seed(self, seed=None):
ConsoleLogger().print('warning', 'seed on dynamics model has no effect ')
pass
def save(self, *args, **kwargs):
return self._dynamics.save(*args, **kwargs)
def load(self, *args, **kwargs):
return self._dynamics.load(*args, **kwargs)
def set_terminal_reward_func(self, terminal_func: TerminalFunc, reward_func: RewardFunc):
self._terminal_func = terminal_func
self._reward_func = reward_func
class DynamicsPriorModel(Basic):
def __init__(self, env_spec: EnvSpec, parameters: Parameters, name: str):
super().__init__(name=name)
self.env_spec = env_spec
self.parameters = parameters
| 10,638 | 38.550186 | 116 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/random_dynamics_model.py | from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import GlobalDynamicsModel
from baconian.core.parameters import Parameters
import numpy as np
class UniformRandomDynamicsModel(GlobalDynamicsModel):
"""
A dynamics that uniformly return the new state (sample by env_spec.obs_space.sample()),
can be used for debugging.
"""
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, init_state=None, name='dynamics_model'):
super().__init__(env_spec, parameters, init_state, name)
def init(self):
super().init()
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
return self.env_spec.obs_space.sample()
def make_copy(self):
return UniformRandomDynamicsModel(env_spec=self.env_spec)
| 807 | 31.32 | 113 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/gaussian_mixture_dynamics_prior.py | from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import DynamicsPriorModel
import numpy as np
from math import inf
from baconian.common.sampler.sample_data import TransitionData
from baconian.algo.dynamics.third_party.gmm import GMM
from baconian.core.parameters import Parameters
class GaussianMixtureDynamicsPrior(DynamicsPriorModel):
"""
A dynamics prior encoded as a GMM over [x_t, u_t, x_t+1] points.
See:
S. Levine*, C. Finn*, T. Darrell, P. Abbeel, "End-to-end
training of Deep Visuomotor Policies", arXiv:1504.00702,
Appendix A.3.
"""
def __init__(self, env_spec: EnvSpec, batch_data: TransitionData = None, epsilon=inf, init_sequential=False,
eigreg=False, warmstart=True, name_scope='gp_dynamics_model', min_samples_per_cluster=40,
max_clusters=20, strength=1,
name='gp_dynamics_model'):
parameters = Parameters(
dict(min_samp=min_samples_per_cluster, max_samples=inf, max_clusters=max_clusters, strength=strength,
init_sequential=init_sequential, eigreg=eigreg, warmstart=warmstart))
super().__init__(env_spec=env_spec, parameters=parameters, name=name)
self.name_scope = name_scope
self.batch_data = batch_data
self.gmm_model = GMM(epsilon=epsilon, init_sequential=init_sequential, eigreg=eigreg, warmstart=warmstart)
self.X, self.U = None, None
def init(self):
pass
def update(self, batch_data: TransitionData = None):
"""
Update prior with additional data.
:param batch_data: data used to update GMM prior
:return: None
"""
# Format Data
xux, K = self._prepare_data(batch_data)
# Update GMM.
self.gmm_model.update(xux, K)
def eval(self, batch_data: TransitionData = None):
"""
Evaluate prior (prob of [x(t), u(t), x(t+1)] given gmm)
:param batch_data: data used to evaluate the prior with.
:return: parameters mu0, Phi, m, n0 as defined in the paper.
"""
# Format Data
xux, _ = self._prepare_data(batch_data)
# Perform query and fix mean.
mu0, Phi, m, n0 = self.gmm_model.inference(xux)
# Factor in multiplier.
n0 = n0 * self.parameters('strength')
m = m * self.parameters('strength')
# Multiply Phi by m (since it was normalized before).
Phi *= m
return mu0, Phi, m, n0
def _prepare_data(self, batch_data: TransitionData = None, *kwargs):
if self.batch_data is None:
self.batch_data = batch_data
X = batch_data.state_set
if X.ndim == 2: X = np.expand_dims(X, axis=0)
U = batch_data.action_set
if U.ndim == 2: U = np.expand_dims(U, axis=0)
# Constants.
T = X.shape[1] - 1
# Append data to dataset.
if self.X is None:
self.X = X
else:
self.X = np.concatenate([self.X, X], axis=0)
if self.U is None:
self.U = U
else:
self.U = np.concatenate([self.U, U], axis=0)
# Remove excess samples from dataset.
start = max(0, self.X.shape[0] - self.parameters('max_samples') + 1)
self.X = self.X[start:, :]
self.U = self.U[start:, :]
# Compute cluster dimensionality.
Do = X.shape[2] + U.shape[2] + X.shape[2] # TODO: Use Xtgt.
# Create dataset.
N = self.X.shape[0]
xux = np.reshape(
np.c_[self.X[:, :T, :], self.U[:, :T, :], self.X[:, 1:(T + 1), :]],
[T * N, Do]
)
# Choose number of clusters.
K = int(max(2, min(self.parameters('max_clusters'), np.floor(float(N * T) / self.parameters('min_samp')))))
return xux, K
| 3,846 | 33.657658 | 115 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/linear_dynamics_model.py | from baconian.common.data_pre_processing import DataScaler
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import GlobalDynamicsModel, TrainableDyanmicsModel
from baconian.core.parameters import Parameters
import numpy as np
from copy import deepcopy
from sklearn.linear_model import LinearRegression
from baconian.common.sampler.sample_data import TransitionData
class LinearDynamicsModel(GlobalDynamicsModel):
"""
A linear dynamics model given the transition matrix F and the bias f (can't be trained, use LinearRegressionModel instead if your want to fit one)
"""
def __init__(self, env_spec: EnvSpec, state_transition_matrix: np.array, bias: np.array, init_state=None,
name='dynamics_model', state_input_scaler: DataScaler = None, action_input_scaler: DataScaler = None,
state_output_scaler: DataScaler = None):
parameters = Parameters(parameters=dict(F=state_transition_matrix, f=bias))
super().__init__(env_spec, parameters, init_state, name, state_input_scaler, action_input_scaler,
state_output_scaler)
assert self.parameters('F').shape == \
(env_spec.obs_space.flat_dim, env_spec.obs_space.flat_dim + env_spec.action_space.flat_dim)
assert self.parameters('f').shape[0] == env_spec.obs_space.flat_dim
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
new_state = np.dot(self.parameters('F'), np.concatenate((state, action))) + self.parameters('f')
return self.env_spec.obs_space.clip(new_state)
def make_copy(self):
return LinearDynamicsModel(env_spec=self.env_spec,
state_transition_matrix=deepcopy(self.parameters('F')),
bias=deepcopy(self.parameters('f')))
@property
def F(self):
return self.parameters('F')
@property
def f(self):
return self.parameters('f')
class LinearRegressionDynamicsModel(GlobalDynamicsModel, TrainableDyanmicsModel):
def __init__(self, env_spec: EnvSpec, init_state=None, name='dynamics_model',
state_input_scaler: DataScaler = None, action_input_scaler: DataScaler = None,
state_output_scaler: DataScaler = None):
super().__init__(env_spec=env_spec, init_state=init_state, name=name,
state_input_scaler=state_input_scaler,
action_input_scaler=action_input_scaler,
state_output_scaler=state_output_scaler)
self._linear_model = LinearRegression(fit_intercept=True,
normalize=False)
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
state = self.state_input_scaler.process(np.array(state).reshape(self.env_spec.obs_shape))
action = self.action_input_scaler.process(action.reshape(self.env_spec.flat_action_dim))
new_state = self._linear_model.predict(np.concatenate([state, action], axis=-1).reshape(1, -1))
new_state = np.clip(self.state_output_scaler.inverse_process(new_state),
self.env_spec.obs_space.low,
self.env_spec.obs_space.high).squeeze()
return new_state
def train(self, batch_data: TransitionData = None, *kwargs):
self.state_input_scaler.update_scaler(batch_data.state_set)
self.action_input_scaler.update_scaler(batch_data.action_set)
self.state_output_scaler.update_scaler(batch_data.new_state_set)
state = self.state_input_scaler.process(batch_data.state_set)
action = self.action_input_scaler.process(batch_data.action_set)
new_state = self.state_input_scaler.process(batch_data.new_state_set)
self._linear_model.fit(X=np.concatenate([state, action], axis=-1),
y=new_state)
| 3,928 | 49.371795 | 150 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/mlp_dynamics_model.py | from baconian.common.special import flatten_n
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import GlobalDynamicsModel, DifferentiableDynamics, TrainableDyanmicsModel
import tensorflow as tf
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.tf.mlp import MLP
from baconian.common.sampler.sample_data import TransitionData
from typeguard import typechecked
import numpy as np
from baconian.tf.util import *
from baconian.algo.misc.placeholder_input import PlaceholderInput
import overrides
from baconian.common.error import *
from baconian.common.logging import record_return_decorator
from baconian.core.status import register_counter_info_to_status_decorator, StatusWithSubInfo
from baconian.common.spaces.box import Box
from baconian.common.data_pre_processing import DataScaler, IdenticalDataScaler
class ContinuousMLPGlobalDynamicsModel(GlobalDynamicsModel, DifferentiableDynamics, PlaceholderInput,
TrainableDyanmicsModel):
STATUS_LIST = GlobalDynamicsModel.STATUS_LIST + ('TRAIN',)
INIT_STATUS = 'CREATED'
def __init__(self, env_spec: EnvSpec,
name_scope: str,
name: str,
mlp_config: list,
learning_rate: float,
state_input_scaler: DataScaler = None,
action_input_scaler: DataScaler = None,
output_delta_state_scaler: DataScaler = None,
init_state=None):
if not isinstance(env_spec.obs_space, Box):
raise TypeError('ContinuousMLPGlobalDynamicsModel only support to predict state that hold space Box type')
GlobalDynamicsModel.__init__(self,
env_spec=env_spec,
parameters=None,
name=name,
state_input_scaler=state_input_scaler,
action_input_scaler=action_input_scaler,
init_state=init_state)
with tf.variable_scope(name_scope):
state_input = tf.placeholder(shape=[None, env_spec.flat_obs_dim], dtype=tf.float32, name='state_ph')
action_input = tf.placeholder(shape=[None, env_spec.flat_action_dim], dtype=tf.float32,
name='action_ph')
mlp_input_ph = tf.concat([state_input, action_input], axis=1, name='state_action_input')
delta_state_label_ph = tf.placeholder(shape=[None, env_spec.flat_obs_dim], dtype=tf.float32,
name='delta_state_label_ph')
mlp_net = MLP(input_ph=mlp_input_ph,
reuse=False,
mlp_config=mlp_config,
name_scope=name_scope,
net_name='mlp')
if mlp_net.output.shape[1] != env_spec.flat_obs_dim:
raise InappropriateParameterSetting(
"mlp output dims {} != env spec obs dim {}".format(mlp_net.output.shape[1], env_spec.flat_obs_dim))
parameters = ParametersWithTensorflowVariable(tf_var_list=mlp_net.var_list,
name=name + '_''mlp_continuous_dynamics_model',
rest_parameters=dict(learning_rate=learning_rate))
DifferentiableDynamics.__init__(self,
input_node_dict=dict(state_input=state_input,
action_action_input=action_input),
output_node_dict=dict(delta_state_output=mlp_net.output))
PlaceholderInput.__init__(self, parameters=parameters)
self.mlp_config = mlp_config
self.name_scope = name_scope
self.action_input = action_input
self.state_input = state_input
self.mlp_input_ph = mlp_input_ph
self.delta_state_label_ph = delta_state_label_ph
self.delta_state_output = mlp_net.output
self.mlp_net = mlp_net
self.output_delta_state_scaler = output_delta_state_scaler if output_delta_state_scaler else IdenticalDataScaler(
dims=self.env_spec.flat_obs_dim)
self._status = StatusWithSubInfo(obj=self)
with tf.variable_scope(name_scope):
with tf.variable_scope('train'):
self.loss, self.optimizer, self.optimize_op = self._setup_loss()
train_var_list = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES,
scope='{}/train'.format(
name_scope)) + self.optimizer.variables()
self.parameters.set_tf_var_list(sorted(list(set(train_var_list)), key=lambda x: x.name))
def init(self, source_obj=None):
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
GlobalDynamicsModel.init(self)
@register_counter_info_to_status_decorator(increment=1, info_key='step')
def step(self, action: np.ndarray, state=None, **kwargs_for_transit):
return super().step(action, state, **kwargs_for_transit)
@record_return_decorator(which_recorder='self')
@register_counter_info_to_status_decorator(increment=1, info_key='train_counter', under_status='TRAIN')
def train(self, batch_data: TransitionData, **kwargs) -> dict:
self.set_status('TRAIN')
self.state_input_scaler.update_scaler(batch_data.state_set)
self.action_input_scaler.update_scaler(batch_data.action_set)
self.output_delta_state_scaler.update_scaler(batch_data.new_state_set - batch_data.state_set)
tf_sess = kwargs['sess'] if ('sess' in kwargs and kwargs['sess']) else tf.get_default_session()
train_iter = self.parameters('train_iter') if 'train_iter' not in kwargs else kwargs['train_iter']
feed_dict = {
self.state_input: self.state_input_scaler.process(batch_data.state_set),
self.action_input: self.action_input_scaler.process(
flatten_n(self.env_spec.action_space, batch_data.action_set)),
self.delta_state_label_ph: self.output_delta_state_scaler.process(
batch_data.new_state_set - batch_data.state_set),
**self.parameters.return_tf_parameter_feed_dict()
}
average_loss = 0.0
for i in range(train_iter):
loss, _ = tf_sess.run([self.loss, self.optimize_op],
feed_dict=feed_dict)
average_loss += loss
return dict(average_loss=average_loss / train_iter)
def save(self, *args, **kwargs):
return PlaceholderInput.save(self, *args, **kwargs)
def load(self, *args, **kwargs):
return PlaceholderInput.load(self, *args, **kwargs)
def copy_from(self, obj: PlaceholderInput) -> bool:
return PlaceholderInput.copy_from(self, obj)
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
state = self.state_input_scaler.process(
np.array(state).reshape(self.env_spec.obs_shape)) if state is not None else self.state
action = self.action_input_scaler.process(self.env_spec.flat_action(action))
if 'sess' in kwargs:
tf_sess = kwargs['sess']
else:
tf_sess = tf.get_default_session()
if len(state.shape) < 2:
state = np.expand_dims(state, 0)
if len(action.shape) < 2:
action = np.expand_dims(action, 0)
delta_state = tf_sess.run(self.delta_state_output,
feed_dict={
self.action_input: action,
self.state_input: state
})
new_state = np.clip(
np.squeeze(self.output_delta_state_scaler.inverse_process(data=np.squeeze(delta_state)) + state),
self.env_spec.obs_space.low, self.env_spec.obs_space.high)
return new_state
def _setup_loss(self):
reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=self.name_scope)
loss = tf.reduce_sum((self.mlp_net.output - self.delta_state_label_ph) ** 2)
if len(reg_loss) > 0:
loss += tf.reduce_sum(reg_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=self.parameters('learning_rate'))
optimize_op = optimizer.minimize(loss=loss, var_list=self.parameters('tf_var_list'))
return loss, optimizer, optimize_op
| 8,670 | 50.613095 | 121 | py |
Subsets and Splits