repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MAgent | MAgent-master/python/magent/builtin/tf_model/base.py | import os
import tensorflow as tf
from magent.model import BaseModel
class TFBaseModel(BaseModel):
"""base model for tensorflow model"""
def __init__(self, env, handle, name, subclass_name):
"""init a model
Parameters
----------
env: magent.Environment
handle: handle (ctypes.c_int32)
name: str
subclass_name: str
name of subclass
"""
BaseModel.__init__(self, env, handle)
self.name = name
self.subclass_name = subclass_name
def save(self, dir_name, epoch):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name = os.path.join(dir_name, self.name)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
saver = tf.train.Saver(model_vars)
saver.save(self.sess, os.path.join(dir_name, (self.subclass_name + "_%d") % epoch))
def load(self, dir_name, epoch=0, name=None):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
if name is None or name == self.name: # the name of saved model is the same as ours
dir_name = os.path.join(dir_name, self.name)
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
saver = tf.train.Saver(model_vars)
saver.restore(self.sess, os.path.join(dir_name, (self.subclass_name + "_%d") % epoch))
else: # load a checkpoint with different name
backup_graph = tf.get_default_graph()
kv_dict = {}
# load checkpoint from another saved graph
with tf.Graph().as_default(), tf.Session() as sess:
tf.train.import_meta_graph(os.path.join(dir_name, name, (self.subclass_name + "_%d") % epoch + ".meta"))
dir_name = os.path.join(dir_name, name)
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(model_vars)
saver.restore(sess, os.path.join(dir_name, (self.subclass_name + "_%d") % epoch))
for item in tf.global_variables():
kv_dict[item.name] = sess.run(item)
# assign to now graph
backup_graph.as_default()
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.name)
for item in model_vars:
old_name = item.name.replace(self.name, name)
self.sess.run(tf.assign(item, kv_dict[old_name]))
| 2,880 | 35.935897 | 120 | py |
MAgent | MAgent-master/python/magent/builtin/tf_model/a2c.py | """ advantage actor critic """
import os
import numpy as np
import tensorflow as tf
from .base import TFBaseModel
class AdvantageActorCritic(TFBaseModel):
def __init__(self, env, handle, name, learning_rate=1e-3,
batch_size=64, reward_decay=0.99, eval_obs=None,
train_freq=1, value_coef=0.1, ent_coef=0.08, use_comm=False,
custom_view_space=None, custom_feature_space=None):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
eval_obs: numpy array
evaluation set of observation
train_freq: int
mean training times of a sample
ent_coef: float
weight of entropy loss in total loss
value_coef: float
weight of value loss in total loss
use_comm: bool
whether use CommNet
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
TFBaseModel.__init__(self, env, handle, name, "tfa2c")
# ======================== set config ========================
self.env = env
self.handle = handle
self.name = name
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.reward_decay = reward_decay
self.batch_size = batch_size
self.learning_rate= learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.value_coef = value_coef # coefficient of value in the total loss
self.ent_coef = ent_coef # coefficient of entropy in the total loss
self.train_ct = 0
self.use_comm = use_comm
# ======================= build network =======================
with tf.name_scope(self.name):
self._create_network(self.view_space, self.feature_space)
# init tensorflow session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
# init training buffers
self.view_buf = np.empty((1,) + self.view_space)
self.feature_buf = np.empty((1,) + self.feature_space)
self.action_buf = np.empty(1, dtype=np.int32)
self.reward_buf = np.empty(1, dtype=np.float32)
def _commnet_block(self, n, hidden, skip, name, hidden_size):
"""a block of CommNet
Parameters
----------
n: int
number of agent
hidden: tf.tensor
hidden layer input
skip: tf.tensor
skip connection
name: str
hidden_size: int
"""
mask = (tf.ones((n, n)) - tf.eye(n))
mask *= tf.where(n > 1, 1.0 / (tf.cast(n, tf.float32) - 1.0), 0)
C = tf.get_variable(name + "_C", shape=(hidden_size, hidden_size))
H = tf.get_variable(name + "_H", shape=(hidden_size, hidden_size))
message = tf.matmul(mask, hidden)
return tf.tanh(tf.matmul(message, C) + tf.matmul(hidden, H) + skip)
def _commnet(self, n, dense, hidden_size, n_step=2):
""" CommNet Learning Multiagent Communication with Backpropagation by S. Sukhbaatar et al. NIPS 2016
Parameters
----------
n: int
number of agent
hidden_size: int
n_step: int
communication step
Returns
-------
h: tf.tensor
hidden units after CommNet
"""
skip = dense
h = dense
for i in range(n_step):
h = self._commnet_block(n, h, skip, "step_%d" % i, hidden_size)
return h
def _create_network(self, view_space, feature_space):
"""define computation graph of network
Parameters
----------
view_space: tuple
feature_space: tuple
the input shape
"""
# input
input_view = tf.placeholder(tf.float32, (None,) + view_space)
input_feature = tf.placeholder(tf.float32, (None,) + feature_space)
action = tf.placeholder(tf.int32, [None])
reward = tf.placeholder(tf.float32, [None])
num_agent = tf.placeholder(tf.int32, [])
kernel_num = [32, 32]
hidden_size = [256]
# fully connected
flatten_view = tf.reshape(input_view, [-1, np.prod([v.value for v in input_view.shape[1:]])])
h_view = tf.layers.dense(flatten_view, units=hidden_size[0], activation=tf.nn.relu)
h_emb = tf.layers.dense(input_feature, units=hidden_size[0], activation=tf.nn.relu)
dense = tf.concat([h_view, h_emb], axis=1)
dense = tf.layers.dense(dense, units=hidden_size[0] * 2, activation=tf.nn.relu)
if self.use_comm:
dense = self._commnet(num_agent, dense, dense.shape[-1].value)
policy = tf.layers.dense(dense, units=self.num_actions, activation=tf.nn.softmax)
policy = tf.clip_by_value(policy, 1e-10, 1-1e-10)
value = tf.layers.dense(dense, units=1)
value = tf.reshape(value, (-1,))
advantage = tf.stop_gradient(reward - value)
action_mask = tf.one_hot(action, self.num_actions)
log_policy = tf.log(policy + 1e-6)
log_prob = tf.reduce_sum(log_policy * action_mask, axis=1)
pg_loss = -tf.reduce_mean(advantage * log_prob)
vf_loss = self.value_coef * tf.reduce_mean(tf.square(reward - value))
neg_entropy = self.ent_coef * tf.reduce_mean(tf.reduce_sum(policy * log_policy, axis=1))
total_loss = pg_loss + vf_loss + neg_entropy
# train op (clip gradient)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(total_loss))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
self.train_op = optimizer.apply_gradients(zip(gradients, variables))
train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(total_loss)
self.input_view = input_view
self.input_feature = input_feature
self.action = action
self.reward = reward
self.num_agent = num_agent
self.policy, self.value = policy, value
self.train_op = train_op
self.pg_loss, self.vf_loss, self.reg_loss = pg_loss, vf_loss, neg_entropy
self.total_loss = total_loss
def infer_action(self, raw_obs, ids, *args, **kwargs):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
n = len(view)
policy = self.sess.run(self.policy, {self.input_view: view,
self.input_feature: feature,
self.num_agent: n})
actions = np.arange(self.num_actions)
ret = np.empty(n, dtype=np.int32)
for i in range(n):
ret[i] = np.random.choice(actions, p=policy[i])
return ret
def train(self, sample_buffer, print_every=1000):
"""feed new data sample and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
Returns
-------
loss: list
policy gradient loss, critic loss, entropy loss
value: float
estimated state value
"""
# calc buffer size
n = 0
for episode in sample_buffer.episodes():
n += len(episode.rewards)
# resize to the new size
self.view_buf.resize((n,) + self.view_space)
self.feature_buf.resize((n,) + self.feature_space)
self.action_buf.resize(n)
self.reward_buf.resize(n)
view, feature = self.view_buf, self.feature_buf
action, reward = self.action_buf, self.reward_buf
ct = 0
gamma = self.reward_decay
# collect episodes from multiple separate buffers to a continuous buffer
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(episode.rewards)
r = np.array(r)
keep = self.sess.run(self.value, feed_dict={
self.input_view: [v[-1]],
self.input_feature: [f[-1]],
self.num_agent: 1
})[0]
for i in reversed(range(m)):
keep = keep * gamma + r[i]
r[i] = keep
view[ct:ct+m] = v
feature[ct:ct+m] = f
action[ct:ct+m] = a
reward[ct:ct+m] = r
ct += m
assert n == ct
# train
_, pg_loss, vf_loss, ent_loss, state_value = self.sess.run(
[self.train_op, self.pg_loss, self.vf_loss, self.reg_loss, self.value], feed_dict={
self.input_view: view,
self.input_feature: feature,
self.action: action,
self.reward: reward,
self.num_agent: len(reward)
})
print("sample", n, pg_loss, vf_loss, ent_loss)
return [pg_loss, vf_loss, ent_loss], np.mean(state_value)
def get_info(self):
"""get information of the model
Returns
-------
info: string
"""
return "a2c train_time: %d" % (self.train_ct)
| 10,232 | 33.570946 | 108 | py |
MAgent | MAgent-master/python/magent/builtin/tf_model/drqn.py | """Deep recurrent Q network"""
import time
import os
import collections
import numpy as np
import tensorflow as tf
from .base import TFBaseModel
class DeepRecurrentQNetwork(TFBaseModel):
def __init__(self, env, handle, name,
batch_size=32, unroll_step=8, reward_decay=0.99, learning_rate=1e-4,
train_freq=1, memory_size=20000, target_update=2000, eval_obs=None,
use_dueling=True, use_double=True, use_episode_train=False,
custom_view_space=None, custom_feature_space=None):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
train_freq: int
mean training times of a sample
target_update: int
target will update every target_update batches
memory_size: int
weight of entropy loss in total loss
eval_obs: numpy array
evaluation set of observation
use_dueling: bool
whether use dueling q network
use_double: bool
whether use double q network
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
TFBaseModel.__init__(self, env, handle, name, "tfdrqn")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.batch_size = batch_size
self.unroll_step = unroll_step
self.handle = handle
self.name = name
self.learning_rate= learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.target_update= target_update # target network update frequency
self.eval_obs = eval_obs
self.use_dueling = use_dueling
self.use_double = use_double
self.use_episode_train = use_episode_train
self.skip_error = 0
self.pad_before_len = unroll_step - 1
self.agent_states = {}
self.train_ct = 0
# ======================= build network =======================
# input place holder
self.target = tf.placeholder(tf.float32, [None])
self.input_view = tf.placeholder(tf.float32, (None,) + self.view_space, name="input_view")
self.input_feature = tf.placeholder(tf.float32, (None,) + self.feature_space, name="input_feature")
self.action = tf.placeholder(tf.int32, [None], name="action")
self.mask = tf.placeholder(tf.float32, [None], name="mask")
self.batch_size_ph = tf.placeholder(tf.int32, [])
self.unroll_step_ph = tf.placeholder(tf.int32, [])
# build graph
with tf.variable_scope(self.name):
with tf.variable_scope("eval_net_scope"):
self.eval_scope_name = tf.get_variable_scope().name
self.qvalues, self.state_in, self.rnn_state = \
self._create_network(self.input_view, self.input_feature)
with tf.variable_scope("target_net_scope"):
self.target_scope_name = tf.get_variable_scope().name
self.target_qvalues, self.target_state_in, self.target_rnn_state = \
self._create_network(self.input_view, self.input_feature)
# loss
self.gamma = reward_decay
self.actions_onehot = tf.one_hot(self.action, self.num_actions)
self.td_error = tf.square(
self.target - tf.reduce_sum(tf.multiply(self.actions_onehot, self.qvalues), axis=1)
)
#self.loss = tf.reduce_mean(self.td_error)
self.loss = tf.reduce_sum(self.td_error * self.mask) / tf.reduce_sum(self.mask)
# train op (clip gradient)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
gradients, _ = tf.clip_by_global_norm(gradients, 10.0)
self.train_op = optimizer.apply_gradients(zip(gradients, variables))
# target network update op
self.update_target_op = []
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.target_scope_name)
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.eval_scope_name)
for i in range(len(t_params)):
self.update_target_op.append(tf.assign(t_params[i], e_params[i]))
# init tensorflow session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
# init memory buffers
self.memory_size = memory_size
self.replay_buffer_lens = collections.deque(maxlen=memory_size)
self.replay_buffer = collections.deque(maxlen=memory_size)
# item format [views, features, actions, rewards, terminals, masks, len]
# init training buffers
self.view_buf = np.empty((1,) + self.view_space)
self.feature_buf = np.empty((1,) + self.feature_space)
self.action_buf, self.reward_buf = np.empty(1, dtype=np.int32), np.empty(1)
self.terminal_buf = np.empty(1, dtype=np.bool)
def _create_network(self, input_view, input_feature, reuse=None):
"""define computation graph of network
Parameters
----------
input_view: tf.tensor
input_feature: tf.tensor
the input tensor
"""
kernel_num = [32, 32]
hidden_size = [256]
# conv
h_conv1 = tf.layers.conv2d(input_view, filters=kernel_num[0], kernel_size=3,
activation=tf.nn.relu, name="conv1", reuse=reuse)
h_conv2 = tf.layers.conv2d(h_conv1, filters=kernel_num[1], kernel_size=3,
activation=tf.nn.relu, name="conv2", reuse=reuse)
flatten_view = tf.reshape(h_conv2, [-1, np.prod([v.value for v in h_conv2.shape[1:]])])
h_view = tf.layers.dense(flatten_view, units=hidden_size[0], activation=tf.nn.relu,
name="dense_view", reuse=reuse)
h_emb = tf.layers.dense(input_feature, units=hidden_size[0], activation=tf.nn.relu,
name="dense_emb", reuse=reuse)
dense = tf.concat([h_view, h_emb], axis=1)
# RNN
state_size = hidden_size[0] * 2
rnn_cell = tf.contrib.rnn.GRUCell(num_units=state_size)
rnn_in = tf.reshape(dense, shape=[self.batch_size_ph, self.unroll_step_ph, state_size])
state_in = rnn_cell.zero_state(self.batch_size_ph, tf.float32)
rnn, rnn_state = tf.nn.dynamic_rnn(
cell=rnn_cell, inputs=rnn_in, dtype=tf.float32, initial_state=state_in
)
rnn = tf.reshape(rnn, shape=[-1, state_size])
if self.use_dueling:
value = tf.layers.dense(dense, units=1, name="dense_value", reuse=reuse)
advantage = tf.layers.dense(dense, units=self.num_actions, use_bias=False,
name="dense_advantage", reuse=reuse)
qvalues = value + advantage - tf.reduce_mean(advantage, axis=1, keep_dims=True)
else:
qvalues = tf.layers.dense(rnn, units=self.num_actions)
self.state_size = state_size
return qvalues, state_in, rnn_state
def _get_agent_states(self, ids):
"""get hidden state of agents"""
n = len(ids)
states = np.empty([n, self.state_size])
default = np.zeros([self.state_size])
for i in range(n):
states[i] = self.agent_states.get(ids[i], default)
return states
def _set_agent_states(self, ids, states):
"""set hidden state for agents"""
if len(ids) <= len(self.agent_states) * 0.5:
self.agent_states = {}
for i in range(len(ids)):
self.agent_states[ids[i]] = states[i]
def infer_action(self, raw_obs, ids, policy='e_greedy', eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
policy: str
can be eps-greedy or greedy
eps: float
used when policy is eps-greedy
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
n = len(ids)
states = self._get_agent_states(ids)
qvalues, states = self.sess.run([self.qvalues, self.rnn_state], feed_dict={
self.input_view: view,
self.input_feature: feature,
self.state_in: states,
self.batch_size_ph: n,
self.unroll_step_ph: 1
})
self._set_agent_states(ids, states)
best_actions = np.argmax(qvalues, axis=1)
if policy == 'e_greedy':
random = np.random.randint(self.num_actions, size=(n,))
cond = np.random.uniform(0, 1, size=(n,)) < eps
ret = np.where(cond, random, best_actions)
elif policy == 'greedy':
ret = best_actions
return ret.astype(np.int32)
def _calc_target(self, next_view, next_feature, rewards, terminal, batch_size, unroll_step):
"""calculate target value"""
n = len(rewards)
if self.use_double:
t_qvalues, qvalues = self.sess.run([self.target_qvalues, self.qvalues], feed_dict={
self.input_view: next_view,
self.input_feature: next_feature,
# self.state_in: state_in,
# self.target_state_in: state_in,
self.batch_size_ph: batch_size,
self.unroll_step_ph: unroll_step})
# ignore the first value (the first value is for computing correct hidden state)
# t_qvalues = t_qvalues.reshape([-1, unroll_step, self.num_actions])
# t_qvalues = t_qvalues[:, 1:, :].reshape([-1, self.num_actions])
# qvalues = qvalues.reshape([-1, unroll_step, self.num_actions])
# qvalues = qvalues[:, 1:, :].reshape([-1, self.num_actions])
next_value = t_qvalues[np.arange(n), np.argmax(qvalues, axis=1)]
else:
t_qvalues = self.sess.run(self.target_qvalues, feed_dict={
self.input_view: next_view,
self.input_feature: next_feature,
# self.target_state_in: state_in,
self.batch_size_ph: batch_size,
self.unroll_step_ph: unroll_step})
# t_qvalues = t_qvalues.reshape([-1, unroll_step, self.num_actions])
# t_qvalues = t_qvalues[:,1:,:].reshape([-1, self.num_actions])
next_value = np.max(t_qvalues, axis=1)
target = np.where(terminal, rewards, rewards + self.gamma * next_value)
return target
def _add_to_replay_buffer(self, sample_buffer):
"""add samples in sample_buffer to replay buffer"""
n = 0
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(r)
mask = np.ones((m,))
terminal = np.zeros((m,), dtype=np.bool)
if episode.terminal:
terminal[-1] = True
else:
mask[-1] = 0
item = [v, f, a, r, terminal, mask, m]
self.replay_buffer_lens.append(m)
self.replay_buffer.append(item)
n += m
return n
def train(self, sample_buffer, print_every=500):
""" add new samples in sample_buffer to replay buffer and train
do not keep hidden state (split episode into short sequences)
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
print_every: int
print log every print_every batches
Returns
-------
loss: float
bellman residual loss
value: float
estimated state value
"""
add_num = self._add_to_replay_buffer(sample_buffer)
batch_size = self.batch_size
unroll_step = self.unroll_step
# calc sample weight of episodes (i.e. their lengths)
replay_buffer = self.replay_buffer
replay_lens_sum = np.sum(self.replay_buffer_lens)
weight = np.array(self.replay_buffer_lens, dtype=np.float32) / replay_lens_sum
n_batches = self.train_freq * add_num / (batch_size * (unroll_step - self.skip_error))
if n_batches == 0:
return 0, 0
max_ = batch_size * unroll_step
batch_view = np.zeros((max_+1,) + self.view_space, dtype=np.float32)
batch_feature = np.zeros((max_+1,) + self.feature_space, dtype=np.float32)
batch_action = np.zeros((max_,), dtype=np.int32)
batch_reward = np.zeros((max_,), dtype=np.float32)
batch_terminal = np.zeros((max_,), dtype=np.bool)
batch_mask = np.zeros((max_,), dtype=np.float32)
# calc batch number
n_batches = int(self.train_freq * add_num / (batch_size * (unroll_step - self.skip_error)))
print("batches: %d add: %d replay_len: %d/%d" %
(n_batches, add_num, len(self.replay_buffer), self.memory_size))
ct = 0
total_loss = 0
start_time = time.time()
# train batches
for i in range(n_batches):
indexes = np.random.choice(len(replay_buffer), self.batch_size, p=weight)
batch_mask[:] = 0
for j in range(batch_size):
item = replay_buffer[indexes[j]]
v, f, a, r, t = item[0], item[1], item[2], item[3], item[4]
length = len(v)
start = np.random.randint(length)
real_step = min(length - start, unroll_step)
beg = j * unroll_step
batch_view[beg:beg+real_step] = v[start:start+real_step]
batch_feature[beg:beg+real_step] = f[start:start+real_step]
batch_action[beg:beg+real_step] = a[start:start+real_step]
batch_reward[beg:beg+real_step] = r[start:start+real_step]
batch_terminal[beg:beg+real_step] = t[start:start+real_step]
batch_mask[beg:beg+real_step] = 1.0
if not t[start+real_step-1]:
batch_mask[beg+real_step-1] = 0
# collect trajectories from different IDs to a single buffer
target = self._calc_target(batch_view[1:], batch_feature[1:],
batch_reward, batch_terminal, batch_size, unroll_step)
ret = self.sess.run([self.train_op, self.loss], feed_dict={
self.input_view: batch_view[:-1],
self.input_feature: batch_feature[:-1],
self.action: batch_action,
self.target: target,
self.mask: batch_mask,
self.batch_size_ph: batch_size,
self.unroll_step_ph: unroll_step,
})
loss = ret[1]
total_loss += loss
if ct % self.target_update == 0:
self.sess.run(self.update_target_op)
if ct % print_every == 0:
print("batch %5d, loss %.6f, qvalue %.6f" % (ct, loss, self._eval(target)))
ct += 1
self.train_ct += 1
total_time = time.time() - start_time
step_average = total_time / max(1.0, (ct / 1000.0))
print("batches: %d, total time: %.2f, 1k average: %.2f" % (ct, total_time, step_average))
return total_loss / ct if ct != 0 else 0, self._eval(target)
def train_keep_hidden(self, sample_buffer, print_every=500):
""" add new samples in sample_buffer to replay buffer and train
keep hidden state (split episode into small sequence, but keep hidden states)
this means must train some episodes continuously not fully random.
to use this training scheme, you should also modify self._calc_target
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
print_every: int
print log every print_every batches
Returns
-------
loss: float
bellman residual loss
value: float
estimated state value
"""
add_num = self._add_to_replay_buffer(sample_buffer)
batch_size = self.batch_size
unroll_step = self.unroll_step
# calc sample weight of episodes (i.e. their lengths)
replay_buffer = self.replay_buffer
replay_lens_sum = np.sum(self.replay_buffer_lens)
weight = np.array(self.replay_buffer_lens, dtype=np.float32) / replay_lens_sum
max_len = self._div_round(np.max(self.replay_buffer_lens), unroll_step)
n_batches = self.train_freq * add_num / (batch_size * unroll_step)
if n_batches == 0:
return 0, 0
# allocate buffer
max_ = batch_size * max_len
batch_view = np.zeros((max_+1,) + self.view_space, dtype=np.float32)
batch_feature = np.zeros((max_+1,) + self.feature_space, dtype=np.float32)
batch_action = np.zeros((max_,), dtype=np.int32)
batch_reward = np.zeros((max_,), dtype=np.float32)
batch_terminal = np.zeros((max_,), dtype=np.bool)
batch_mask = np.zeros((max_,), dtype=np.float32)
batch_hidden = np.zeros((batch_size, self.state_size), dtype=np.float32)
batch_pick = np.zeros((batch_size, max_len), dtype=np.bool)
pick_buffer = np.arange(max_, dtype=np.int32)
print("batches: %d add: %d replay_len: %d, %d/%d" %
(n_batches, add_num, replay_lens_sum, len(self.replay_buffer), self.memory_size))
start_time = time.time()
total_loss = 0
ct = 0
while ct < n_batches:
# random sample agent episodes (sequence)
indexs = np.random.choice(len(replay_buffer), self.batch_size, p=weight)
train_length = 0
to_sort = []
# collect length and sort
for j, index in enumerate(indexs):
length = replay_buffer[index][-1]
length = self._div_round(length, unroll_step)
train_length = max(train_length, length)
to_sort.append([index, length])
to_sort.sort(key=lambda x: -x[1])
# merge short episodes to long episodes (use greedy method)
merged = [False for _ in range(batch_size)]
rows = []
for j in range(len(to_sort)):
if merged[j]:
continue
row = [to_sort[j][0]]
now_len = to_sort[j][1]
if True: # use compress
for k in range(j+1, batch_size):
if now_len + to_sort[k][1] <= train_length:
row.append(to_sort[k][0])
now_len += to_sort[k][1]
merged[k] = True
rows.append(row)
n_rows = len(rows)
batch_reset = np.zeros((train_length, batch_size), dtype=np.bool)
batch_mask[:] = 0
# copy from replay buffer to batch buffer
for j, row in enumerate(rows):
beg = j * max_len
init_beg = beg
# fill a row
for index in row:
v, f, a, r, terminal, mask, x = replay_buffer[index]
batch_reset[(beg - init_beg)/unroll_step, j] = True
batch_view[beg:beg+x] = v
batch_feature[beg:beg+x] = f
batch_action[beg:beg+x] = a
batch_reward[beg:beg+x] = r
batch_terminal[beg:beg+x] = terminal
batch_mask[beg:beg+x] = mask
beg += self._div_round(x, unroll_step)
# train steps
for j in range((train_length + unroll_step - 1) / unroll_step):
batch_pick[:] = False
batch_pick[:n_rows, j * unroll_step:(j+1) * unroll_step] = True
pick = pick_buffer[batch_pick.reshape(-1)].reshape(n_rows, unroll_step)
next_pick = np.empty((n_rows, unroll_step + 1), dtype=np.int32) # next pick choose one more state than pick
next_pick[:, :unroll_step] = pick
next_pick[:, unroll_step] = pick[:, -1] + 1
pick = pick.reshape(-1)
next_pick = next_pick.reshape(-1)
steps = len(pick) / n_rows
assert steps > 0
if np.sum(batch_mask[pick]) < 1:
continue
batch_hidden[batch_reset[j]] = np.zeros_like(batch_hidden[0])
batch_target = self._calc_target(batch_view[next_pick], batch_feature[next_pick],
batch_reward[pick], batch_terminal[pick], batch_hidden[:n_rows],
n_rows, steps + 1)
ret = self.sess.run(
[self.train_op, self.loss, self.rnn_state],
feed_dict={
self.input_view: batch_view[pick],
self.input_feature: batch_feature[pick],
self.action: batch_action[pick],
self.target: batch_target,
self.mask: batch_mask[pick],
self.state_in: batch_hidden[:n_rows],
self.batch_size_ph: n_rows,
self.unroll_step_ph: steps
})
loss, batch_hidden[:n_rows] = ret[1], ret[2]
total_loss += loss
if ct % self.target_update == 0:
self.sess.run(self.update_target_op)
if ct % print_every == 0:
print("batches %5d, mask %d/%d (%d), loss %.6f, qvalue %.6f" %
(ct, sum(batch_mask), n_rows * train_length, n_rows, loss, self._eval(batch_target)))
ct += 1
self.train_ct += 1
total_time = time.time() - start_time
step_average = total_time / max(1.0, (ct / 1000.0))
print("batches: %d, total time: %.2f, 1k average: %.2f" % (ct, total_time, step_average))
return round(total_loss / ct if ct != 0 else 0, 6), self._eval(batch_target)
@staticmethod
def _div_round(x, divisor):
"""round up to nearest integer that are divisible by divisor"""
return (x + divisor - 1) / divisor * divisor
def _eval(self, target):
"""evaluate estimated q value"""
if self.eval_obs is None:
return np.mean(target)
else:
return np.mean(self.sess.run(self.target_qvalues, feed_dict={
self.input_view: self.eval_obs[0],
self.input_feature: self.eval_obs[1],
self.batch_size_ph: self.eval_obs[0].shape[0],
self.unroll_step_ph: 1
}))
def get_info(self):
"""get information of model"""
return "tfdrqn train_time: %d" % (self.train_ct)
| 24,180 | 40.54811 | 124 | py |
MAgent | MAgent-master/python/magent/builtin/tf_model/dqn.py | """Deep q network"""
import time
import numpy as np
import tensorflow as tf
from .base import TFBaseModel
from ..common import ReplayBuffer
class DeepQNetwork(TFBaseModel):
def __init__(self, env, handle, name,
batch_size=64, learning_rate=1e-4, reward_decay=0.99,
train_freq=1, target_update=2000, memory_size=2 ** 20, eval_obs=None,
use_dueling=True, use_double=True, use_conv=True,
custom_view_space=None, custom_feature_space=None,
num_gpu=1, infer_batch_size=8192, network_type=0):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
train_freq: int
mean training times of a sample
target_update: int
target will update every target_update batches
memory_size: int
weight of entropy loss in total loss
eval_obs: numpy array
evaluation set of observation
use_dueling: bool
whether use dueling q network
use_double: bool
whether use double q network
use_conv: bool
use convolution or fully connected layer as state encoder
num_gpu: int
number of gpu
infer_batch_size: int
batch size while inferring actions
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
TFBaseModel.__init__(self, env, handle, name, "tfdqn")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.batch_size = batch_size
self.learning_rate= learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.target_update= target_update # target network update frequency
self.eval_obs = eval_obs
self.infer_batch_size = infer_batch_size # maximum batch size when infer actions,
# change this to fit your GPU memory if you meet a OOM
self.use_dueling = use_dueling
self.use_double = use_double
self.num_gpu = num_gpu
self.network_type = network_type
self.train_ct = 0
# ======================= build network =======================
# input place holder
self.target = tf.placeholder(tf.float32, [None])
self.weight = tf.placeholder(tf.float32, [None])
self.input_view = tf.placeholder(tf.float32, (None,) + self.view_space)
self.input_feature = tf.placeholder(tf.float32, (None,) + self.feature_space)
self.action = tf.placeholder(tf.int32, [None])
self.mask = tf.placeholder(tf.float32, [None])
self.eps = tf.placeholder(tf.float32) # e-greedy
# build graph
with tf.variable_scope(self.name):
with tf.variable_scope("eval_net_scope"):
self.eval_scope_name = tf.get_variable_scope().name
self.qvalues = self._create_network(self.input_view, self.input_feature, use_conv)
if self.num_gpu > 1: # build inference graph for multiple gpus
self._build_multi_gpu_infer(self.num_gpu)
with tf.variable_scope("target_net_scope"):
self.target_scope_name = tf.get_variable_scope().name
self.target_qvalues = self._create_network(self.input_view, self.input_feature, use_conv)
# loss
self.gamma = reward_decay
self.actions_onehot = tf.one_hot(self.action, self.num_actions)
td_error = tf.square(self.target - tf.reduce_sum(tf.multiply(self.actions_onehot, self.qvalues), axis=1))
self.loss = tf.reduce_sum(td_error * self.mask) / tf.reduce_sum(self.mask)
# train op (clip gradient)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
gradients, variables = zip(*optimizer.compute_gradients(self.loss))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
self.train_op = optimizer.apply_gradients(zip(gradients, variables))
# output action
def out_action(qvalues):
best_action = tf.argmax(qvalues, axis=1)
best_action = tf.to_int32(best_action)
random_action = tf.random_uniform(tf.shape(best_action), 0, self.num_actions, tf.int32)
should_explore = tf.random_uniform(tf.shape(best_action), 0, 1) < self.eps
return tf.where(should_explore, random_action, best_action)
self.output_action = out_action(self.qvalues)
if self.num_gpu > 1:
self.infer_out_action = [out_action(qvalue) for qvalue in self.infer_qvalues]
# target network update op
self.update_target_op = []
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.target_scope_name)
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.eval_scope_name)
for i in range(len(t_params)):
self.update_target_op.append(tf.assign(t_params[i], e_params[i]))
# init tensorflow session
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
# init replay buffers
self.replay_buf_len = 0
self.memory_size = memory_size
self.replay_buf_view = ReplayBuffer(shape=(memory_size,) + self.view_space)
self.replay_buf_feature = ReplayBuffer(shape=(memory_size,) + self.feature_space)
self.replay_buf_action = ReplayBuffer(shape=(memory_size,), dtype=np.int32)
self.replay_buf_reward = ReplayBuffer(shape=(memory_size,))
self.replay_buf_terminal = ReplayBuffer(shape=(memory_size,), dtype=np.bool)
self.replay_buf_mask = ReplayBuffer(shape=(memory_size,))
# if mask[i] == 0, then the item is used for padding, not for training
def _create_network(self, input_view, input_feature, use_conv=True, reuse=None):
"""define computation graph of network
Parameters
----------
input_view: tf.tensor
input_feature: tf.tensor
the input tensor
"""
kernel_num = [32, 32]
hidden_size = [256]
if use_conv: # convolution
h_conv1 = tf.layers.conv2d(input_view, filters=kernel_num[0], kernel_size=3,
activation=tf.nn.relu, name="conv1", reuse=reuse)
h_conv2 = tf.layers.conv2d(h_conv1, filters=kernel_num[1], kernel_size=3,
activation=tf.nn.relu, name="conv2", reuse=reuse)
flatten_view = tf.reshape(h_conv2, [-1, np.prod([v.value for v in h_conv2.shape[1:]])])
h_view = tf.layers.dense(flatten_view, units=hidden_size[0], activation=tf.nn.relu,
name="dense_view", reuse=reuse)
else: # fully connected
flatten_view = tf.reshape(input_view, [-1, np.prod([v.value for v in input_view.shape[1:]])])
h_view = tf.layers.dense(flatten_view, units=hidden_size[0], activation=tf.nn.relu)
h_emb = tf.layers.dense(input_feature, units=hidden_size[0], activation=tf.nn.relu,
name="dense_emb", reuse=reuse)
dense = tf.concat([h_view, h_emb], axis=1)
if self.use_dueling:
value = tf.layers.dense(dense, units=1, name="value", reuse=reuse)
advantage = tf.layers.dense(dense, units=self.num_actions, use_bias=False,
name="advantage", reuse=reuse)
qvalues = value + advantage - tf.reduce_mean(advantage, axis=1, keep_dims=True)
else:
qvalues = tf.layers.dense(dense, units=self.num_actions, name="value", reuse=reuse)
return qvalues
def infer_action(self, raw_obs, ids, policy='e_greedy', eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
policy: str
can be eps-greedy or greedy
eps: float
used when policy is eps-greedy
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
if policy == 'e_greedy':
eps = eps
elif policy == 'greedy':
eps = 0
n = len(view)
batch_size = min(n, self.infer_batch_size)
if self.num_gpu > 1 and n > batch_size: # infer by multi gpu in parallel
ret = self._infer_multi_gpu(view, feature, ids, eps)
else: # infer by splitting big batch in serial
ret = []
for i in range(0, n, batch_size):
beg, end = i, i + batch_size
ret.append(self.sess.run(self.output_action, feed_dict={
self.input_view: view[beg:end],
self.input_feature: feature[beg:end],
self.eps: eps}))
ret = np.concatenate(ret)
return ret
def _calc_target(self, next_view, next_feature, rewards, terminal):
"""calculate target value"""
n = len(rewards)
if self.use_double:
t_qvalues, qvalues = self.sess.run([self.target_qvalues, self.qvalues],
feed_dict={self.input_view: next_view,
self.input_feature: next_feature})
next_value = t_qvalues[np.arange(n), np.argmax(qvalues, axis=1)]
else:
t_qvalues = self.sess.run(self.target_qvalues, {self.input_view: next_view,
self.input_feature: next_feature})
next_value = np.max(t_qvalues, axis=1)
target = np.where(terminal, rewards, rewards + self.gamma * next_value)
return target
def _add_to_replay_buffer(self, sample_buffer):
"""add samples in sample_buffer to replay buffer"""
n = 0
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(r)
mask = np.ones((m,))
terminal = np.zeros((m,), dtype=np.bool)
if episode.terminal:
terminal[-1] = True
else:
mask[-1] = 0
self.replay_buf_view.put(v)
self.replay_buf_feature.put(f)
self.replay_buf_action.put(a)
self.replay_buf_reward.put(r)
self.replay_buf_terminal.put(terminal)
self.replay_buf_mask.put(mask)
n += m
self.replay_buf_len = min(self.memory_size, self.replay_buf_len + n)
return n
def train(self, sample_buffer, print_every=1000):
""" add new samples in sample_buffer to replay buffer and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
print_every: int
print log every print_every batches
Returns
-------
loss: float
bellman residual loss
value: float
estimated state value
"""
add_num = self._add_to_replay_buffer(sample_buffer)
batch_size = self.batch_size
total_loss = 0
n_batches = int(self.train_freq * add_num / batch_size)
if n_batches == 0:
return 0, 0
print("batch number: %d add: %d replay_len: %d/%d" %
(n_batches, add_num, self.replay_buf_len, self.memory_size))
start_time = time.time()
ct = 0
for i in range(n_batches):
# fetch a batch
index = np.random.choice(self.replay_buf_len - 1, batch_size)
batch_view = self.replay_buf_view.get(index)
batch_feature = self.replay_buf_feature.get(index)
batch_action = self.replay_buf_action.get(index)
batch_reward = self.replay_buf_reward.get(index)
batch_terminal = self.replay_buf_terminal.get(index)
batch_mask = self.replay_buf_mask.get(index)
batch_next_view = self.replay_buf_view.get(index+1)
batch_next_feature = self.replay_buf_feature.get(index+1)
batch_target = self._calc_target(batch_next_view, batch_next_feature,
batch_reward, batch_terminal)
ret = self.sess.run([self.train_op, self.loss], feed_dict={
self.input_view: batch_view,
self.input_feature: batch_feature,
self.action: batch_action,
self.target: batch_target,
self.mask: batch_mask
})
loss = ret[1]
total_loss += loss
if ct % self.target_update == 0:
self.sess.run(self.update_target_op)
if ct % print_every == 0:
print("batch %5d, loss %.6f, eval %.6f" % (ct, loss, self._eval(batch_target)))
ct += 1
self.train_ct += 1
total_time = time.time() - start_time
step_average = total_time / max(1.0, (ct / 1000.0))
print("batches: %d, total time: %.2f, 1k average: %.2f" % (ct, total_time, step_average))
return total_loss / ct if ct != 0 else 0, self._eval(batch_target)
def _eval(self, target):
"""evaluate estimated q value"""
if self.eval_obs is None:
return np.mean(target)
else:
return np.mean(self.sess.run([self.qvalues], feed_dict={
self.input_view: self.eval_obs[0],
self.input_feature: self.eval_obs[1]
}))
def clear_buffer(self):
"""clear replay buffer"""
self.replay_buf_len = 0
self.replay_buf_view.clear()
self.replay_buf_feature.clear()
self.replay_buf_action.clear()
self.replay_buf_reward.clear()
self.replay_buf_terminal.clear()
self.replay_buf_mask.clear()
def _build_multi_gpu_infer(self, num_gpu):
"""build inference graph for multi gpus"""
self.infer_qvalues = []
self.infer_input_view = []
self.infer_input_feature = []
for i in range(num_gpu):
self.infer_input_view.append(tf.placeholder(tf.float32, (None,) + self.view_space))
self.infer_input_feature.append(tf.placeholder(tf.float32, (None,) + self.feature_space))
with tf.variable_scope("eval_net_scope"), tf.device("/gpu:%d" % i):
self.infer_qvalues.append(self._create_network(self.infer_input_view[i],
self.infer_input_feature[i], reuse=True))
def _infer_multi_gpu(self, view, feature, ids, eps):
"""infer action by multi gpu in parallel """
ret = []
beg = 0
while beg < len(view):
feed_dict = {self.eps: eps}
for i in range(self.num_gpu):
end = beg + self.infer_batch_size
feed_dict[self.infer_input_view[i]] = view[beg:end]
feed_dict[self.infer_input_feature[i]] = feature[beg:end]
beg += self.infer_batch_size
ret.extend(self.sess.run(self.infer_out_action, feed_dict=feed_dict))
return np.concatenate(ret)
| 16,266 | 40.286802 | 113 | py |
MAgent | MAgent-master/python/magent/builtin/tf_model/__init__.py | from .dqn import DeepQNetwork
from .drqn import DeepRecurrentQNetwork
from .a2c import AdvantageActorCritic
| 108 | 26.25 | 39 | py |
MAgent | MAgent-master/python/magent/builtin/config/double_attack.py | """
A cooperation game, tigers must attack a same deer simultaneously to get reward
"""
import magent
def get_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"embedding_size": 10})
deer = cfg.register_agent_type(
"deer",
{'width': 1, 'length': 1, 'hp': 5, 'speed': 1,
'view_range': gw.CircleRange(1), 'attack_range': gw.CircleRange(0),
'step_recover': 0.2,
'kill_supply': 8,
})
tiger = cfg.register_agent_type(
"tiger",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 1,
'view_range': gw.CircleRange(4), 'attack_range': gw.CircleRange(1),
'damage': 1, 'step_recover': -0.2,
})
deer_group = cfg.add_group(deer)
tiger_group = cfg.add_group(tiger)
a = gw.AgentSymbol(tiger_group, index='any')
b = gw.AgentSymbol(tiger_group, index='any')
c = gw.AgentSymbol(deer_group, index='any')
# tigers get reward when they attack a deer simultaneously
e1 = gw.Event(a, 'attack', c)
e2 = gw.Event(b, 'attack', c)
cfg.add_reward_rule(e1 & e2, receiver=[a, b], value=[1, 1])
return cfg
| 1,213 | 27.232558 | 79 | py |
MAgent | MAgent-master/python/magent/builtin/config/battle.py | """ battle of two armies """
import magent
def get_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 10})
small = cfg.register_agent_type(
"small",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 2,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(1.5),
'damage': 2, 'step_recover': 0.1,
'step_reward': -0.005, 'kill_reward': 5, 'dead_penalty': -0.1, 'attack_penalty': -0.1,
})
g0 = cfg.add_group(small)
g1 = cfg.add_group(small)
a = gw.AgentSymbol(g0, index='any')
b = gw.AgentSymbol(g1, index='any')
# reward shaping to encourage attack
cfg.add_reward_rule(gw.Event(a, 'attack', b), receiver=a, value=0.2)
cfg.add_reward_rule(gw.Event(b, 'attack', a), receiver=b, value=0.2)
return cfg
| 943 | 26.764706 | 96 | py |
MAgent | MAgent-master/python/magent/builtin/config/pursuit.py | import magent
def get_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
predator = cfg.register_agent_type(
"predator",
{
'width': 2, 'length': 2, 'hp': 1, 'speed': 1,
'view_range': gw.CircleRange(5), 'attack_range': gw.CircleRange(2),
'attack_penalty': -0.2
})
prey = cfg.register_agent_type(
"prey",
{
'width': 1, 'length': 1, 'hp': 1, 'speed': 1.5,
'view_range': gw.CircleRange(4), 'attack_range': gw.CircleRange(0)
})
predator_group = cfg.add_group(predator)
prey_group = cfg.add_group(prey)
a = gw.AgentSymbol(predator_group, index='any')
b = gw.AgentSymbol(prey_group, index='any')
cfg.add_reward_rule(gw.Event(a, 'attack', b), receiver=[a, b], value=[1, -1])
return cfg
| 904 | 25.617647 | 81 | py |
MAgent | MAgent-master/python/magent/builtin/config/__init__.py | 0 | 0 | 0 | py |
|
MAgent | MAgent-master/python/magent/builtin/config/forest.py | """ tigers eat deer to get health point and reward"""
import magent
def get_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"embedding_size": 10})
deer = cfg.register_agent_type(
"deer",
{'width': 1, 'length': 1, 'hp': 5, 'speed': 1,
'view_range': gw.CircleRange(1), 'attack_range': gw.CircleRange(0),
'damage': 0, 'step_recover': 0.2,
'food_supply': 0, 'kill_supply': 8,
})
tiger = cfg.register_agent_type(
"tiger",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 1,
'view_range': gw.CircleRange(4), 'attack_range': gw.CircleRange(1),
'damage': 3, 'step_recover': -0.5,
'food_supply': 0, 'kill_supply': 0,
'step_reward': 1, 'attack_penalty': -0.1,
})
deer_group = cfg.add_group(deer)
tiger_group = cfg.add_group(tiger)
return cfg
| 963 | 26.542857 | 76 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/base.py | import os
import mxnet as mx
from magent.utility import has_gpu
from magent.model import BaseModel
class MXBaseModel(BaseModel):
def __init__(self, env, handle, name, subclass_name):
"""init a model
Parameters
----------
env: magent.Environment
handle: handle (ctypes.c_int32)
name: str
subclass_name: str
name of subclass
"""
BaseModel.__init__(self, env, handle)
self.name = name
self.subclass_name = subclass_name
def _get_ctx(self):
"""return correct context , priority: gpu > cpu
Returns
-------
ctx: mx.context
"""
if has_gpu():
return mx.gpu()
else:
return mx.cpu()
def save(self, dir_name, epoch):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name = os.path.join(dir_name, self.name, )
if not os.path.exists(dir_name):
os.mkdir(dir_name)
pre = os.path.join(dir_name, self.subclass_name)
self.model.save_checkpoint(pre, epoch, save_optimizer_states=True)
def load(self, dir_name, epoch=0, name=None):
"""save model to dir
Parameters
----------
dir_name: str
name of the directory
epoch: int
"""
name = name or self.name
dir_name = os.path.join(dir_name, name)
pre = os.path.join(dir_name, self.subclass_name)
_, arg_params, aux_params = mx.model.load_checkpoint(pre, epoch)
self.model.set_params(arg_params, aux_params, force_init=True)
| 1,779 | 25.567164 | 74 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/a2c.py | """advantage actor critic"""
import os
import time
import numpy as np
import mxnet as mx
from .base import MXBaseModel
class AdvantageActorCritic(MXBaseModel):
def __init__(self, env, handle, name, eval_obs=None,
batch_size=64, reward_decay=0.99, learning_rate=1e-3,
train_freq=1, value_coef=0.1, ent_coef=0.1,
custom_view_space=None, custom_feature_space=None,
*args, **kwargs):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
eval_obs: numpy array
evaluation set of observation
train_freq: int
mean training times of a sample
ent_coef: float
weight of entropy loss in total loss
value_coef: float
weight of value loss in total loss
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
MXBaseModel.__init__(self, env, handle, name, "mxa2c")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.reward_decay = reward_decay
self.batch_size = batch_size
self.learning_rate= learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.eval_obs = eval_obs
self.value_coef = value_coef
self.ent_coef = ent_coef
self.train_ct = 0
# ======================= build network =======================
self.ctx = self._get_ctx()
self.input_view = mx.sym.var("input_view")
self.input_feature = mx.sym.var("input_feature")
policy, value = self._create_network(self.input_view, self.input_feature)
log_policy = mx.sym.log(policy)
out_policy = mx.sym.BlockGrad(policy)
neg_entropy = ent_coef * mx.sym.sum(policy * log_policy, axis=1)
neg_entropy = mx.sym.MakeLoss(data=neg_entropy)
self.sym = mx.sym.Group([log_policy, value, neg_entropy, out_policy])
self.model = mx.mod.Module(self.sym, data_names=['input_view', 'input_feature'],
label_names=None, context=self.ctx)
# bind (set initial batch size)
self.bind_size = batch_size
self.model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)],
label_shapes=None)
# init params
self.model.init_params(initializer=mx.init.Xavier())
self.model.init_optimizer(optimizer='adam', optimizer_params={
'learning_rate': learning_rate,
'clip_gradient': 10,
})
# init training buffers
self.view_buf = np.empty((1,) + self.view_space)
self.feature_buf = np.empty((1,) + self.feature_space)
self.action_buf = np.empty(1, dtype=np.int32)
self.advantage_buf, self.value_buf = np.empty(1), np.empty(1)
self.terminal_buf = np.empty(1, dtype=np.bool)
# print("parameters", self.model.get_params())
# mx.viz.plot_network(self.output).view()
def _create_network(self, input_view, input_feature):
"""define computation graph of network
Parameters
----------
view_space: tuple
feature_space: tuple
the input shape
"""
kernel_num = [32, 32]
hidden_size = [256]
if False:
h_conv1 = mx.sym.Convolution(data=input_view, kernel=(3, 3),
num_filter=kernel_num[0], layout="NHWC")
h_conv1 = mx.sym.Activation(data=h_conv1, act_type="relu")
h_conv2 = mx.sym.Convolution(data=h_conv1, kernel=(3, 3),
num_filter=kernel_num[1], layout="NHWC")
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
else:
input_view = mx.sym.flatten(data=input_view)
h_conv2 = mx.sym.FullyConnected(input_view, num_hidden=hidden_size[0])
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
flatten_view = mx.sym.flatten(data=h_conv2)
h_view = mx.sym.FullyConnected(data=flatten_view, num_hidden=hidden_size[0])
h_view = mx.sym.Activation(data=h_view, act_type="relu")
h_emb = mx.sym.FullyConnected(data=input_feature, num_hidden=hidden_size[0])
h_emb = mx.sym.Activation(data=h_emb, act_type="relu")
dense = h_view + h_emb
policy = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions, no_bias=True)
policy = mx.sym.SoftmaxActivation(data=policy)
policy = mx.sym.clip(data=policy, a_min=1e-5, a_max=1 - 1e-5)
value = mx.sym.FullyConnected(data=dense, num_hidden=1)
return policy, value
def infer_action(self, raw_obs, ids, policy="e_greedy", eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
n = len(view)
ret = np.empty(n, dtype=np.int32)
self._reset_bind_size(n)
data_batch = mx.io.DataBatch(data=[mx.nd.array(view), mx.nd.array(feature)])
self.model.forward(data_batch, is_train=False)
policy = self.model.get_outputs()[3].asnumpy()
actions = np.arange(self.num_actions)
for i in range(n):
ret[i] = np.random.choice(actions, p=policy[i])
return ret
def train(self, sample_buffer, print_every=1000):
"""feed new data sample and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
Returns
-------
loss: list
policy gradient loss, critic loss, entropy loss
value: float
estimated state value
"""
# calc buffer size
n = 0
for episode in sample_buffer.episodes():
if episode.terminal:
n += len(episode.rewards)
else:
n += len(episode.rewards) - 1
if n == 0:
return [0.0, 0.0, 0.0], 0.0
# resize to the new size
self.view_buf.resize((n,) + self.view_space)
self.feature_buf.resize((n,) + self.feature_space)
self.action_buf.resize(n)
self.value_buf.resize(n)
self.advantage_buf.resize(n)
view, feature = self.view_buf, self.feature_buf
action, value = self.action_buf, self.value_buf
advantage = self.advantage_buf
ct = 0
gamma = self.reward_decay
# collect episodes from multiple separate buffers to a continuous buffer
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(episode.rewards)
self._reset_bind_size(m)
data_batch = mx.io.DataBatch(data=[mx.nd.array(v), mx.nd.array(f)])
self.model.forward(data_batch, is_train=False)
value = self.model.get_outputs()[1].asnumpy().flatten()
delta_t = np.empty(m)
if episode.terminal:
delta_t[:m-1] = r[:m-1] + gamma * value[1:m] - value[:m-1]
delta_t[m-1] = r[m-1] + gamma * 0 - value[m-1]
else:
delta_t[:m-1] = r[:m-1] + gamma * value[1:m] - value[:m-1]
m -= 1
v, f, a = v[:-1], f[:-1], a[:-1]
if m == 0:
continue
# discount advantage
keep = 0
for i in reversed(range(m)):
keep = keep * gamma + delta_t[i]
advantage[ct+i] = keep
view[ct:ct+m] = v
feature[ct:ct+m] = f
action[ct:ct+m] = a
ct += m
assert n == ct
n = len(advantage)
neg_advantage = -advantage
neg_advs_np = np.zeros((n, self.num_actions), dtype=np.float32)
neg_advs_np[np.arange(n), action] = neg_advantage
neg_advs = mx.nd.array(neg_advs_np)
# the grads of values are exactly negative advantages
v_grads = mx.nd.array(self.value_coef * (neg_advantage[:, np.newaxis]))
data_batch = mx.io.DataBatch(data=[mx.nd.array(view), mx.nd.array(feature)])
self._reset_bind_size(n)
self.model.forward(data_batch, is_train=True)
self.model.backward(out_grads=[neg_advs, v_grads])
self.model.update()
log_policy, value, entropy_loss, _ = self.model.get_outputs()
value = mx.nd.mean(value).asnumpy()[0]
log_policy = log_policy.asnumpy()[np.arange(n), action]
pg_loss = np.mean(neg_advantage * log_policy)
entropy_loss = np.mean(entropy_loss.asnumpy())
value_loss = self.value_coef * np.mean(np.square(advantage))
print("sample %d %.4f %.4f %.4f %.4f" % (n, pg_loss, value_loss, entropy_loss, value))
return [pg_loss, value_loss, entropy_loss], value
def _reset_bind_size(self, new_size):
"""reset input shape of the model
Parameters
----------
new_size: int
new batch size
"""
if self.bind_size == new_size:
return
else:
self.bind_size = new_size
self.model.reshape(
data_shapes=[
('input_view', (new_size,) + self.view_space),
('input_feature', (new_size,) + self.feature_space)],
)
def get_info(self):
"""get information of the model
Returns
-------
info: string
"""
return "mx dqn train_time: %d" % (self.train_ct)
| 10,630 | 34.674497 | 95 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/dqn.py | import time
import numpy as np
import mxnet as mx
from .base import MXBaseModel
from ..common import ReplayBuffer
from ...utility import has_gpu
class DeepQNetwork(MXBaseModel):
def __init__(self, env, handle, name,
batch_size=64, learning_rate=1e-4, reward_decay=0.99,
train_freq=1, target_update=2000, memory_size=2 ** 20, eval_obs=None,
use_dueling=True, use_double=True, infer_batch_size=8192,
custom_view_space=None, custom_feature_space=None, num_gpu=1):
"""init a model
Parameters
----------
env: Environment
environment
handle: Handle (ctypes.c_int32)
handle of this group, can be got by env.get_handles
name: str
name of this model
learning_rate: float
batch_size: int
reward_decay: float
reward_decay in TD
train_freq: int
mean training times of a sample
target_update: int
target will update every target_update batches
memory_size: int
weight of entropy loss in total loss
eval_obs: numpy array
evaluation set of observation
use_dueling: bool
whether use dueling q network
use_double: bool
whether use double q network
num_gpu: int
number of gpu
infer_batch_size: int
batch size while inferring actions
custom_feature_space: tuple
customized feature space
custom_view_space: tuple
customized feature space
"""
MXBaseModel.__init__(self, env, handle, name, "mxdqn")
# ======================== set config ========================
self.env = env
self.handle = handle
self.view_space = custom_view_space or env.get_view_space(handle)
self.feature_space = custom_feature_space or env.get_feature_space(handle)
self.num_actions = env.get_action_space(handle)[0]
self.batch_size = batch_size
self.infer_batch_size = infer_batch_size
self.learning_rate = learning_rate
self.train_freq = train_freq # train time of every sample (s,a,r,s')
self.target_update = target_update # update frequency of target network
self.eval_obs = eval_obs
self.num_gpu = num_gpu
self.use_dueling = use_dueling
self.use_double = use_double
self.train_ct = 0
# ======================= build network =======================
self.ctx = self._get_ctx()
if self.num_gpu > 1 and self.ctx == mx.gpu():
self.ctx = []
for i in range(self.num_gpu):
self.ctx.append(mx.gpu(i))
self.input_view = mx.sym.var("input_view")
self.input_feature = mx.sym.var("input_feature")
self.mask = mx.sym.var("mask")
self.action = mx.sym.var("action")
self.target = mx.sym.var("target")
self.qvalues = self._create_network(self.input_view, self.input_feature)
self.gamma = reward_decay
self.action_onehot = mx.sym.one_hot(self.action, depth=self.num_actions)
td_error = mx.sym.square(self.target - mx.sym.sum(self.qvalues * self.action_onehot, axis=1))
self.loss = mx.sym.sum(td_error * self.mask) / mx.sym.sum(self.mask)
self.loss = mx.sym.MakeLoss(data=self.loss)
self.out_qvalues = mx.sym.BlockGrad(self.qvalues)
self.output = mx.sym.Group([self.out_qvalues, self.loss])
self.model = mx.mod.Module(self.output,
data_names=['input_view', 'input_feature'],
label_names=['action', 'target', 'mask'], context=self.ctx)
self.target_model = mx.mod.Module(self.qvalues,
data_names=['input_view', 'input_feature'],
label_names=[], context=self.ctx)
# bind (set initial batch size)
self.bind_size = batch_size
self.model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)],
label_shapes=[('action', (batch_size,)),
('target', (batch_size,)),
('mask', (batch_size,))])
self.target_model.bind(data_shapes=[('input_view', (batch_size,) + self.view_space),
('input_feature', (batch_size,) + self.feature_space)])
# init params
self.model.init_params(initializer=mx.init.Xavier())
self.model.init_optimizer(optimizer='adam', optimizer_params={
'learning_rate': learning_rate,
'clip_gradient': 10.0})
self._copy_network(self.target_model, self.model)
# init replay buffers
self.replay_buf_len = 0
self.memory_size = memory_size
self.replay_buf_view = ReplayBuffer(shape=(memory_size,) + self.view_space)
self.replay_buf_feature = ReplayBuffer(shape=(memory_size,) + self.feature_space)
self.replay_buf_action = ReplayBuffer(shape=(memory_size,), dtype=np.int32)
self.replay_buf_reward = ReplayBuffer(shape=(memory_size,))
self.replay_buf_terminal = ReplayBuffer(shape=(memory_size,), dtype=np.bool)
self.replay_buf_mask = ReplayBuffer(shape=(memory_size,))
# if mask[i] == 0, then the item is used for padding, not for training
# print("parameters", self.model.get_params())
# mx.viz.plot_network(self.loss).view()
def _create_network(self, input_view, input_feature, use_conv=True):
"""define computation graph of network
Parameters
----------
input_view: mx.symbol
input_feature: mx.symbol
the input tensor
"""
kernel_num = [32, 32]
hidden_size = [256]
if use_conv:
input_view = mx.sym.transpose(data=input_view, axes=[0, 3, 1, 2])
h_conv1 = mx.sym.Convolution(data=input_view, kernel=(3, 3),
num_filter=kernel_num[0], layout="NCHW")
h_conv1 = mx.sym.Activation(data=h_conv1, act_type="relu")
h_conv2 = mx.sym.Convolution(data=h_conv1, kernel=(3, 3),
num_filter=kernel_num[1], layout="NCHW")
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
else:
input_view = mx.sym.flatten(data=input_view)
h_conv2 = mx.sym.FullyConnected(input_view, num_hidden=hidden_size[0])
h_conv2 = mx.sym.Activation(data=h_conv2, act_type="relu")
flatten_view = mx.sym.flatten(data=h_conv2)
h_view = mx.sym.FullyConnected(data=flatten_view, num_hidden=hidden_size[0])
h_view = mx.sym.Activation(data=h_view, act_type="relu")
h_emb = mx.sym.FullyConnected(data=input_feature, num_hidden=hidden_size[0])
h_emb = mx.sym.Activation(data=h_emb, act_type="relu")
dense = mx.sym.concat(h_view, h_emb)
if self.use_dueling:
# state value
value = mx.sym.FullyConnected(data=dense, num_hidden=1)
advantage = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)
mean = mx.sym.mean(advantage, axis=1, keepdims=True)
advantage = mx.sym.broadcast_sub(advantage, mean)
qvalues = mx.sym.broadcast_add(advantage, value)
else:
qvalues = mx.sym.FullyConnected(data=dense, num_hidden=self.num_actions)
return qvalues
def infer_action(self, raw_obs, ids, policy="e_greedy", eps=0):
"""infer action for a batch of agents
Parameters
----------
raw_obs: tuple(numpy array, numpy array)
raw observation of agents tuple(views, features)
ids: numpy array
ids of agents
policy: str
can be eps-greedy or greedy
eps: float
used when policy is eps-greedy
Returns
-------
acts: numpy array of int32
actions for agents
"""
view, feature = raw_obs[0], raw_obs[1]
if policy == 'e_greedy':
eps = eps
elif policy == 'greedy':
eps = 0
n = len(view)
if n < self.num_gpu:
view = np.tile(view, (self.num_gpu, 1, 1, 1))
feature = np.tile(feature, (self.num_gpu, 1))
batch_size = min(len(view), self.infer_batch_size)
self._reset_bind_size(batch_size)
best_actions = []
infer_iter = mx.io.NDArrayIter(data=[view, feature], batch_size=batch_size)
for batch in infer_iter:
self.model.forward(batch, is_train=False)
qvalue_batch = self.model.get_outputs()[0]
batch_action = mx.nd.argmax(qvalue_batch, axis=1)
best_actions.append(batch_action)
best_actions = np.array([x.asnumpy() for x in best_actions]).flatten()
best_actions = best_actions[:n]
random = np.random.randint(self.num_actions, size=(n,))
cond = np.random.uniform(0, 1, size=(n,)) < eps
ret = np.where(cond, random, best_actions)
return ret.astype(np.int32)
def _calc_target(self, next_view, next_feature, rewards, terminal):
"""calculate target value"""
n = len(rewards)
data_batch = mx.io.DataBatch(data=[mx.nd.array(next_view), mx.nd.array(next_feature)])
self._reset_bind_size(n)
if self.use_double:
self.target_model.forward(data_batch, is_train=False)
self.model.forward(data_batch, is_train=False)
t_qvalues = self.target_model.get_outputs()[0].asnumpy()
qvalues = self.model.get_outputs()[0].asnumpy()
next_value = t_qvalues[np.arange(n), np.argmax(qvalues, axis=1)]
else:
self.target_model.forward(data_batch, is_train=False)
t_qvalues = self.target_model.get_outputs()[0].asnumpy()
next_value = np.max(t_qvalues, axis=1)
target = np.where(terminal, rewards, rewards + self.gamma * next_value)
return target
def _add_to_replay_buffer(self, sample_buffer):
"""add samples in sample_buffer to replay buffer"""
n = 0
for episode in sample_buffer.episodes():
v, f, a, r = episode.views, episode.features, episode.actions, episode.rewards
m = len(r)
mask = np.ones((m,))
terminal = np.zeros((m,), dtype=np.bool)
if episode.terminal:
terminal[-1] = True
else:
mask[-1] = 0
self.replay_buf_view.put(v)
self.replay_buf_feature.put(f)
self.replay_buf_action.put(a)
self.replay_buf_reward.put(r)
self.replay_buf_terminal.put(terminal)
self.replay_buf_mask.put(mask)
n += m
self.replay_buf_len = min(self.memory_size, self.replay_buf_len + n)
return n
def train(self, sample_buffer, print_every=1000):
""" add new samples in sample_buffer to replay buffer and train
Parameters
----------
sample_buffer: magent.utility.EpisodesBuffer
buffer contains samples
print_every: int
print log every print_every batches
Returns
-------
loss: float
bellman residual loss
value: float
estimated state value
"""
add_num = self._add_to_replay_buffer(sample_buffer)
batch_size = self.batch_size
total_loss = 0
n_batches = int(self.train_freq * add_num / batch_size)
if n_batches == 0:
return 0, 0
print("batch number: %d add: %d replay_len: %d/%d" %
(n_batches, add_num, self.replay_buf_len, self.memory_size))
start_time = time.time()
ct = 0
for i in range(n_batches):
# fetch a batch
index = np.random.choice(self.replay_buf_len - 1, batch_size)
batch_view = self.replay_buf_view.get(index)
batch_feature = self.replay_buf_feature.get(index)
batch_action = self.replay_buf_action.get(index)
batch_reward = self.replay_buf_reward.get(index)
batch_terminal = self.replay_buf_terminal.get(index)
batch_mask = self.replay_buf_mask.get(index)
batch_next_view = self.replay_buf_view.get(index+1)
batch_next_feature = self.replay_buf_feature.get(index+1)
batch_target = self._calc_target(batch_next_view, batch_next_feature,
batch_reward, batch_terminal)
self._reset_bind_size(batch_size)
batch = mx.io.DataBatch(data=[mx.nd.array(batch_view),
mx.nd.array(batch_feature)],
label=[mx.nd.array(batch_action),
mx.nd.array(batch_target),
mx.nd.array(batch_mask)])
self.model.forward(batch, is_train=True)
self.model.backward()
self.model.update()
loss = np.mean(self.model.get_outputs()[1].asnumpy())
total_loss += loss
if ct % self.target_update == 0:
self._copy_network(self.target_model, self.model)
if ct % print_every == 0:
print("batch %5d, loss %.6f, eval %.6f" % (ct, loss, self._eval(batch_target)))
ct += 1
self.train_ct += 1
total_time = time.time() - start_time
step_average = total_time / max(1.0, (ct / 1000.0))
print("batches: %d, total time: %.2f, 1k average: %.2f" % (ct, total_time, step_average))
return total_loss / ct if ct != 0 else 0, self._eval(batch_target)
def _reset_bind_size(self, new_size):
"""reset batch size"""
if self.bind_size == new_size:
return
else:
self.bind_size = new_size
def _reshape(model, is_target):
data_shapes = [('input_view', (new_size,) + self.view_space),
('input_feature', (new_size,) + self.feature_space)]
label_shapes = [('action', (new_size,)),
('target', (new_size,)),
('mask', (new_size,))]
if is_target:
label_shapes = None
model.reshape(data_shapes=data_shapes, label_shapes=label_shapes)
_reshape(self.model, False)
_reshape(self.target_model, True)
def _copy_network(self, dest, source):
"""copy to target network"""
arg_params, aux_params = source.get_params()
dest.set_params(arg_params, aux_params)
def _eval(self, target):
"""evaluate estimated q value"""
if self.eval_obs is None:
return np.mean(target)
else:
self._reset_bind_size(len(self.eval_obs[0]))
with self.ctx:
batch = mx.io.DataBatch(data=[mx.nd.array(self.eval_obs[0]),
mx.nd.array(self.eval_obs[1])])
self.model.forward(batch, is_train=False)
return np.mean(self.model.get_outputs()[0].asnumpy())
def get_info(self):
return "mx dqn train_time: %d" % (self.train_ct) | 15,724 | 39.424165 | 101 | py |
MAgent | MAgent-master/python/magent/builtin/mx_model/__init__.py | from .dqn import DeepQNetwork
from .a2c import AdvantageActorCritic
| 68 | 22 | 37 | py |
MAgent | MAgent-master/python/magent/builtin/rule_model/rush.py | """deprecated"""
import ctypes
import numpy as np
from magent.model import BaseModel
from magent.c_lib import _LIB, as_int32_c_array, as_float_c_array
class RushPredator(BaseModel):
def __init__(self, env, handle, attack_handle, *args, **kwargs):
BaseModel.__init__(self, env, handle)
self.attack_channel = env.get_channel(attack_handle)
self.attack_base, self.view2attack = env.get_view2attack(handle)
print("attack_channel", self.attack_channel)
print("view2attack", self.view2attack)
def infer_action(self, observations, *args, **kwargs):
obs_buf = as_float_c_array(observations[0])
hp_buf = as_float_c_array(observations[1])
n, height, width, n_channel = observations[0].shape
buf = np.empty((n,), dtype=np.int32)
act_buf = as_int32_c_array(buf)
attack_channel = self.attack_channel
attack_base = self.attack_base
view2attack_buf = as_int32_c_array(self.view2attack)
_LIB.rush_prey_infer_action(obs_buf, hp_buf, n, height, width, n_channel,
act_buf, attack_channel, attack_base,
view2attack_buf, ctypes.c_float(100.0))
return buf
| 1,243 | 35.588235 | 81 | py |
MAgent | MAgent-master/python/magent/builtin/rule_model/rushgather.py | """gather agent, rush to food according to minimap"""
import numpy as np
from magent.model import BaseModel
from magent.c_lib import _LIB, as_int32_c_array, as_float_c_array
class RushGatherer(BaseModel):
def __init__(self, env, handle, *args, **kwargs):
BaseModel.__init__(self, env, handle)
self.env = env
self.handle = handle
self.n_action = env.get_action_space(handle)
self.view_size = env.get_view_space(handle)
self.attack_base, self.view2attack = env.get_view2attack(handle)
def infer_action(self, states, *args, **kwargs):
obs_buf = as_float_c_array(states[0])
hp_buf = as_float_c_array(states[1])
n, height, width, n_channel = states[0].shape
buf = np.empty((n,), dtype=np.int32)
act_buf = as_int32_c_array(buf)
attack_base = self.attack_base
view2attack_buf = as_int32_c_array(self.view2attack)
_LIB.gather_infer_action(obs_buf, hp_buf, n, height, width, n_channel,
act_buf, attack_base, view2attack_buf)
return buf
| 1,105 | 33.5625 | 78 | py |
MAgent | MAgent-master/python/magent/builtin/rule_model/runaway.py | """deprecated"""
import numpy as np
from magent.model import BaseModel
from magent.c_lib import _LIB, as_int32_c_array, as_float_c_array
class RunawayPrey(BaseModel):
def __init__(self, env, handle, away_handle, *args, **kwargs):
BaseModel.__init__(self, env, handle)
self.away_channel = env.get_channel(away_handle)
self.attack_base, _ = env.get_view2attack(handle)
self.move_back = 4
print("attack base", self.attack_base, "away", self.away_channel)
def infer_action(self, observations, *args, **kwargs):
obs_buf = as_float_c_array(observations[0])
hp_buf = as_float_c_array(observations[1])
n, height, width, n_channel = observations[0].shape
buf = np.empty((n,), dtype=np.int32)
act_buf = as_int32_c_array(buf)
_LIB.runaway_infer_action(obs_buf, hp_buf, n, height, width, n_channel,
self.attack_base, act_buf, self.away_channel, self.move_back)
return buf
| 1,006 | 34.964286 | 95 | py |
MAgent | MAgent-master/python/magent/builtin/rule_model/random.py | """A random agent"""
import numpy as np
from magent.model import BaseModel
class RandomActor(BaseModel):
def __init__(self, env, handle, *args, **kwargs):
BaseModel.__init__(self, env, handle)
self.env = env
self.handle = handle
self.n_action = env.get_action_space(handle)[0]
def infer_action(self, obs, *args, **kwargs):
num = len(obs[0])
actions = np.random.randint(self.n_action, size=num, dtype=np.int32)
return actions
| 495 | 23.8 | 76 | py |
MAgent | MAgent-master/python/magent/builtin/rule_model/__init__.py | from .random import RandomActor
from .rush import RushPredator
from .runaway import RunawayPrey
from .rushgather import RushGatherer
| 133 | 25.8 | 36 | py |
MAgent | MAgent-master/python/magent/renderer/pygame_renderer.py | from __future__ import absolute_import
from __future__ import division
import math
import pygame
import numpy as np
from magent.renderer.base_renderer import BaseRenderer
from magent.renderer.server import BaseServer
class PyGameRenderer(BaseRenderer):
def __init__(self):
super(PyGameRenderer, self).__init__()
def start(
self,
server,
animation_total=2,
animation_stop=0,
resolution=None,
fps_soft_bound=60,
background_rgb=(255, 255, 255),
attack_line_rgb=(0, 0, 0),
attack_dot_rgb=(0, 0, 0),
attack_dot_size=0.3,
text_rgb=(0, 0, 0),
text_size=16,
text_spacing=3,
banner_size=32,
banner_spacing=3,
bigscreen_size=72,
bigscreen_spacing=0,
grid_rgba=(pygame.Color(0, 0, 0), 30),
grid_size=7.5,
grid_min_size=2,
grid_max_size=100,
zoom_rate=1 / 30,
move_rate=4,
full_screen=False
):
def draw_line(surface, color, a, b):
pygame.draw.line(
surface, color,
(int(round(a[0])), int(round(a[1]))),
(int(round(b[0])), int(round(b[1])))
)
def draw_rect(surface, color, a, w, h):
pygame.draw.rect(surface, color, pygame.Rect(*map(int, (
round(a[0]), round(a[1]),
round(w + a[0] - round(a[0])),
round(h + a[1] - round(a[1]))))))
def draw_rect_matrix(matrix, color, a, w, h, resolution):
x, y, w, h = map(int, (round(a[0]), round(a[1]), round(w + a[0] - round(a[0])), round(h + a[1] - round(a[1]))))
matrix[max(x, 0):min(x + w, resolution[0]), max(y, 0):min(h + y, resolution[1]), :] = color
def draw_line_matrix(matrix, color, a, b, resolution):
a = (min(max(0, a[0]), resolution[0] - 1), min(max(0, a[1]), resolution[1] - 1))
b = (min(max(0, b[0]), resolution[0] - 1), min(max(0, b[1]), resolution[1] - 1))
a = map(int, (round(a[0]), round(a[1])))
b = map(int, (round(b[0]), round(b[1])))
if a[0] == b[0]:
if a[1] > b[1]:
matrix[a[0], b[1]:a[1] + 1] = color
else:
matrix[a[0], a[1]:b[1] + 1] = color
elif a[1] == b[1]:
if a[0] > b[0]:
matrix[b[0]:a[0] + 1, a[1]] = color
else:
matrix[a[0]:b[0] + 1, a[1]] = color
else:
raise NotImplementedError
if not isinstance(server, BaseServer):
raise BaseException('property server must be an instance of BaseServer')
pygame.init()
pygame.display.init()
if resolution is None:
info = pygame.display.Info()
resolution = info.current_w, info.current_h
clock = pygame.time.Clock()
if full_screen:
canvas = pygame.display.set_mode(resolution, pygame.DOUBLEBUF | pygame.FULLSCREEN, 0)
else:
canvas = pygame.display.set_mode(resolution, pygame.DOUBLEBUF, 0)
pygame.display.set_caption('MAgent Renderer Window')
text_formatter = pygame.font.SysFont(None, text_size, True)
banner_formatter = pygame.font.SysFont(None, banner_size, True)
bigscreen_formatter = pygame.font.SysFont(None, bigscreen_size, True)
map_size, groups, static_info = server.get_info()
view_position = [map_size[0] / 2 * grid_size - resolution[0] / 2,
map_size[1] / 2 * grid_size - resolution[1] / 2]
frame_id = 0
walls = static_info['wall']
old_data = None
new_data = None
need_static_update = True
#show_grid = False
animation_progress = 0
grid_map = np.zeros((resolution[0], resolution[1], 3), dtype=np.int16)
while True:
done = False
status = server.get_status(frame_id)
triggered = False
# calculate the relative moues coordinates in the gridworld
mouse_x, mouse_y = pygame.mouse.get_pos()
mouse_x = int((mouse_x + view_position[0]) / grid_size)
mouse_y = int((mouse_y + view_position[1]) / grid_size)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
done = True
elif event.type == pygame.KEYDOWN:
#if event.key == pygame.K_g:
# show_grid = not show_grid
#else:
# triggered = server.keydown(frame_id, event.key, mouse_x, mouse_y)
triggered = server.keydown(frame_id, event.key, mouse_x, mouse_y)
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 4 or event.button == 5:
center_before = (
(view_position[0] + resolution[0] / 2) / grid_size,
(view_position[1] + resolution[1] / 2) / grid_size
)
if event.button == 5:
grid_size = max(grid_size - grid_size * zoom_rate, grid_min_size)
need_static_update = True
else:
grid_size = min(grid_size + grid_size * zoom_rate, grid_max_size)
need_static_update = True
center_after = (
(view_position[0] + resolution[0] / 2) / grid_size,
(view_position[1] + resolution[1] / 2) / grid_size
)
view_position[0] += (center_before[0] - center_after[0]) * grid_size
view_position[1] += (center_before[1] - center_after[1]) * grid_size
else:
triggered = server.mousedown(frame_id, pygame.mouse.get_pressed(), mouse_x, mouse_y)
pressed = pygame.key.get_pressed()
if pressed[pygame.K_ESCAPE]:
pygame.quit()
done = True
if pressed[pygame.K_COMMA] or pressed[pygame.K_PERIOD]:
# center before means the center before zoom operation
# center after means the center after zoom operation
# we need to keep that the above two are consistent during zoom operation
# and hence we need to adjust view_position simultaneously
center_before = (
(view_position[0] + resolution[0] / 2) / grid_size,
(view_position[1] + resolution[1] / 2) / grid_size
)
if pressed[pygame.K_COMMA]:
grid_size = max(grid_size - grid_size * zoom_rate, grid_min_size)
need_static_update = True
else:
grid_size = min(grid_size + grid_size * zoom_rate, grid_max_size)
need_static_update = True
center_after = (
(view_position[0] + resolution[0] / 2) / grid_size,
(view_position[1] + resolution[1] / 2) / grid_size
)
view_position[0] += (center_before[0] - center_after[0]) * grid_size
view_position[1] += (center_before[1] - center_after[1]) * grid_size
if pressed[pygame.K_LEFT]:
view_position[0] -= move_rate * grid_size
need_static_update = True
if pressed[pygame.K_RIGHT]:
view_position[0] += move_rate * grid_size
need_static_update = True
if pressed[pygame.K_UP]:
view_position[1] -= move_rate * grid_size
need_static_update = True
if pressed[pygame.K_DOWN]:
view_position[1] += move_rate * grid_size
need_static_update = True
if done:
break
# x_range: which vertical gridlines should be shown on the display
# y_range: which horizontal gridlines should be shown on the display
x_range = (
max(0, int(math.floor(max(0, view_position[0]) / grid_size))),
min(map_size[0], int(math.ceil(max(0, view_position[0] + resolution[0]) / grid_size)))
)
y_range = (
max(0, int(math.floor(max(0, view_position[1]) / grid_size))),
min(map_size[1], int(math.ceil(max(0, view_position[1] + resolution[1]) / grid_size)))
)
canvas.fill(background_rgb)
#if show_grid:
# if need_static_update or True:
# grids = pygame.Surface(resolution)
# grids.set_alpha(grid_rgba[1])
# grids.fill(background_rgb)
#
# for i in range(x_range[0], x_range[1] + 1):
# draw_line(
# canvas, grid_rgba[0],
# (i * grid_size - view_position[0], max(0, view_position[1]) - view_position[1]),
# (
# i * grid_size - view_position[0],
# min(view_position[1] + resolution[1], map_size[1] * grid_size) - view_position[1]
# )
# )
# for i in range(y_range[0], y_range[1] + 1):
# draw_line(
# canvas, grid_rgba[0],
# (max(0, view_position[0]) - view_position[0], i * grid_size - view_position[1]),
# (
# min(view_position[0] + resolution[0], map_size[0] * grid_size) - view_position[0],
# i * grid_size - view_position[1]
# )
# )
if new_data is None or animation_progress > animation_total + animation_stop:
buffered_new_data = server.get_data(
frame_id,
(view_position[0] / grid_size, (view_position[0] + resolution[0]) / grid_size),
(view_position[1] / grid_size, (view_position[1] + resolution[1]) / grid_size)
)
if buffered_new_data is None:
buffered_new_data = new_data
old_data = new_data
new_data = buffered_new_data
frame_id += 1
animation_progress = 0
if new_data is not None:
if old_data is None and animation_progress == 0:
animation_progress = animation_total
if need_static_update:
pygame.pixelcopy.surface_to_array(grid_map, canvas)
for wall in walls:
x, y = wall[0], wall[1]
if x >= x_range[0] and x <= x_range[1] and y >= y_range[0] and y <= y_range[1]:
draw_rect_matrix(grid_map, (127, 127, 127),
(x * grid_size - view_position[0], y * grid_size - view_position[1]),
grid_size, grid_size, resolution)
pygame.pixelcopy.array_to_surface(canvas, grid_map)
rate = min(1.0, animation_progress / animation_total)
for key in new_data[0]:
new_prop = new_data[0][key]
old_prop = old_data[0][key] if old_data is not None and key in old_data[0] else None
new_group = groups[new_prop[2]]
old_group = groups[old_prop[2]] if old_prop is not None else None
now_prop = [a * (1 - rate) + b * rate for a, b in
zip(old_prop, new_prop)] if old_prop is not None else new_prop
now_group = [a * (1 - rate) + b * rate for a, b in
zip(old_group, new_group)] if old_group is not None else new_group
draw_rect(
canvas, (int(now_group[2]), int(now_group[3]), int(now_group[4])),
(
now_prop[0] * grid_size - view_position[0],
now_prop[1] * grid_size - view_position[1]
),
now_group[0] * grid_size,
now_group[1] * grid_size
)
for key, event_x, event_y in new_data[1]:
if not key in new_data[0]:
continue
new_prop = new_data[0][key]
old_prop = old_data[0][key] if old_data is not None and key in old_data[0] else None
new_group = groups[new_prop[2]]
old_group = groups[old_prop[2]] if old_prop is not None else None
now_prop = [a * (1 - rate) + b * rate for a, b in
zip(old_prop, new_prop)] if old_prop is not None else new_prop
now_group = [a * (1 - rate) + b * rate for a, b in
zip(old_group, new_group)] if old_group is not None else new_group
draw_line(
canvas, attack_line_rgb,
(
now_prop[0] * grid_size - view_position[0] + now_group[0] / 2 * grid_size,
now_prop[1] * grid_size - view_position[1] + now_group[1] / 2 * grid_size
),
(
event_x * grid_size - view_position[0] + grid_size / 2,
event_y * grid_size - view_position[1] + grid_size / 2
)
)
draw_rect(
canvas, attack_dot_rgb,
(
event_x * grid_size - view_position[0] + grid_size / 2 - attack_dot_size * grid_size / 2,
event_y * grid_size - view_position[1] + grid_size / 2 - attack_dot_size * grid_size / 2,
),
attack_dot_size * grid_size,
attack_dot_size * grid_size
)
if status or triggered or animation_progress < animation_total + animation_stop:
animation_progress += 1
text_fps = text_formatter.render('FPS: {}'.format(int(clock.get_fps())), True, text_rgb)
text_window = text_formatter.render(
'Window: (%.1f, %.1f, %.1f, %.1f)' % (
view_position[0], view_position[1],
view_position[0] + resolution[0],
view_position[1] + resolution[1]
), True, text_rgb
)
text_grids = text_formatter.render('Numbers: %d' % len(new_data[0]), True, text_rgb)
text_mouse = text_formatter.render('Mouse: (%d, %d)' % (mouse_x, mouse_y), True, text_rgb)
canvas.blit(text_fps, (0, 0))
canvas.blit(text_window, (0, (text_size + text_spacing) / 1.5))
canvas.blit(text_grids, (0, (text_size + text_spacing) / 1.5 * 2))
canvas.blit(text_mouse, (0, (text_size + text_spacing) / 1.5 * 3))
height_now = 0
for texts in server.get_banners(frame_id, resolution):
content = []
width, height = 0, 0
for text in texts:
text = banner_formatter.render(text[0], True, pygame.Color(*text[1]))
content.append((text, width))
width += text.get_width()
height = max(height, text.get_height())
start = (resolution[0] - width) / 2.0
for b in content:
canvas.blit(b[0], (start + b[1], height_now))
height_now += height + banner_spacing
endscreen_texts = server.get_endscreen(frame_id)
if endscreen_texts:
total_height = 0
endscreen_contents = []
endscreen = pygame.Surface(resolution)
endscreen.set_alpha(230)
endscreen.fill(background_rgb)
for texts in endscreen_texts:
content = []
height = 0
for text in texts:
text = bigscreen_formatter.render(text[0], True, pygame.Color(*text[1]))
content.append([text])
height = max(height, text.get_height())
total_height += height + bigscreen_spacing
endscreen_contents.append(content)
total_height -= bigscreen_spacing
for content in endscreen_contents:
height, total_width = 0, 0
for b in content:
b.append(total_width)
total_width += b[0].get_width()
height = max(height, b[0].get_height())
width_now = (resolution[0] - total_width) / 2.0
for b in content:
endscreen.blit(b[0], (width_now + b[1], resolution[1] / 2.0 - total_height + height))
height_now += height + bigscreen_spacing
canvas.blit(endscreen, (0, 0))
if need_static_update:
need_static_update = False
pygame.display.update()
clock.tick(fps_soft_bound)
| 18,209 | 46.298701 | 123 | py |
MAgent | MAgent-master/python/magent/renderer/base_renderer.py | from abc import ABCMeta, abstractmethod
class BaseRenderer:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def start(self, *args, **kwargs):
pass
| 200 | 14.461538 | 39 | py |
MAgent | MAgent-master/python/magent/renderer/__init__.py | from .base_renderer import BaseRenderer
from .pygame_renderer import PyGameRenderer
| 84 | 27.333333 | 43 | py |
MAgent | MAgent-master/python/magent/renderer/server/base_server.py | from abc import ABCMeta, abstractmethod
class BaseServer:
__metaclass__ = ABCMeta
@abstractmethod
def get_info(self):
pass
@abstractmethod
def get_data(self, frame_id, x_range, y_range):
pass
@abstractmethod
def add_agents(self, x, y, g):
pass
@abstractmethod
def get_map_size(self):
pass
@abstractmethod
def get_banners(self, frame_id, resolution):
pass
@abstractmethod
def get_status(self, frame_id):
pass
@abstractmethod
def keydown(self, frame_id, key, mouse_x, mouse_y):
pass
@abstractmethod
def mousedown(self, frame_id, key, mouse_x, mouse_y):
pass
@abstractmethod
def get_endscreen(self, frame_id):
pass | 778 | 18 | 57 | py |
MAgent | MAgent-master/python/magent/renderer/server/sample_server.py | from .base_server import BaseServer
class SampleServer(BaseServer):
def get_group_info(self):
return [[1, 1, 0, 0, 0]]
def get_static_info(self):
return {"walls": []}
def get_data(self, frame_id, x_range, y_range):
if frame_id == 0:
return {1: [10, 10, 0]}, [(1, 0, 0)]
elif frame_id == 1:
return {1: [9, 10, 0]}, [(1, 0, 0)]
elif frame_id == 2:
return {1: [8, 10, 0]}, [(1, 0, 0)]
elif frame_id == 3:
return {1: [14, 12, 0]}, [(1, 0, 0)]
else:
return {1: [10, 10, 0]}, [(1, 0, 0)]
def add_agents(self, x, y, g):
pass
def get_map_size(self):
return [50, 50]
| 716 | 24.607143 | 51 | py |
MAgent | MAgent-master/python/magent/renderer/server/random_server.py | import random
from .base_server import BaseServer
class RandomServer(BaseServer):
def __init__(self, agent_number=1000, group_number=20, map_size=100, shape_range=3, speed=5, event_range=100):
self._data = {}
self._map_size = map_size
self._number = agent_number
for i in range(agent_number):
self._data.setdefault(i, [
random.randint(0, map_size - 1),
random.randint(0, map_size - 1),
random.randint(0, group_number - 1)
])
self._group = []
for i in range(group_number):
self._group.append([
random.randint(1, shape_range),
random.randint(1, shape_range),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
])
self._speed = speed
self._event_range = event_range
self._map_size = map_size
def get_group_info(self):
return self._group
def get_static_info(self):
return {"wall": []}
def get_data(self, frame_id, x_range, y_range):
result = {}
event = []
for i in self._data:
olddata = self._data[i]
data = [0, 0, 0]
data[0] = olddata[0] + random.randint(-self._speed, self._speed)
data[1] = olddata[1] + random.randint(-self._speed, self._speed)
data[0] = min(max(data[0], 0), self._map_size - 1)
data[1] = min(max(data[1], 0), self._map_size - 1)
data[2] = olddata[2]
self._data[i] = data
if (x_range[0] <= data[0] <= x_range[1] and y_range[0] <= data[1] <= y_range[1]) or \
(x_range[0] <= olddata[0] <= x_range[1] and y_range[0] <= olddata[1] <= y_range[1]):
result.setdefault(i, olddata)
event_number = random.randint(0, self._event_range)
for i in range(event_number):
agent_id, _ = random.choice(self._data.items())
event.append(
(
agent_id,
random.randint(0, self._map_size - 1),
random.randint(0, self._map_size - 1)
)
)
return result, event
def add_agents(self, x, y, g):
self._data.setdefault(self._number, (x, y, g))
self._number += 1
def get_map_size(self):
return self._map_size, self._map_size
| 2,474 | 34.357143 | 114 | py |
MAgent | MAgent-master/python/magent/renderer/server/arrange_server.py | import time
import numpy as np
import random
import magent
from magent.builtin.tf_model import DeepQNetwork
from magent.renderer.server import BaseServer
from magent.utility import FontProvider
def remove_wall(d, cur_pos, wall_set, unit):
if d == 0:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + i, cur_pos[1] + unit + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 1:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] - unit + i, cur_pos[1] + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 2:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + i, cur_pos[1] - unit + j)
if temp in wall_set:
wall_set.remove(temp)
elif d == 3:
for i in range(0, unit):
for j in range(0, unit):
temp = (cur_pos[0] + unit + i, cur_pos[1] + j)
if temp in wall_set:
wall_set.remove(temp)
def dfs(x, y, width, height, unit, wall_set):
pos = set()
trace = list()
pos.add((x, y))
trace.append((x, y))
max_x = x + width
max_y = y + height
d = random.choice(range(4))
pos_list = []
flag = 0
while len(trace) > 0:
if flag == 4:
cur_pos = trace[-1]
trace.pop()
if random.choice(range(2)) == 0:
remove_wall(d, cur_pos, wall_set, unit)
flag = 0
if len(trace) == 0:
break
cur_pos = list(trace[-1])
if d == 0:
cur_pos[1] = max(y, cur_pos[1] - 2 * unit)
elif d == 1:
cur_pos[0] = min(max_x, cur_pos[0] + 2 * unit)
elif d == 2:
cur_pos[1] = min(max_y, cur_pos[1] + 2 * unit)
elif d == 3:
cur_pos[0] = max(x, cur_pos[0] - 2 * unit)
if tuple(cur_pos) in pos:
d = (d + 1) % 4
flag += 1
else:
remove_wall(d, cur_pos, wall_set, unit)
trace.append(tuple(cur_pos))
pos.add(tuple(cur_pos))
d = random.choice(range(4))
def clean_pos_set_convert_to_list(pos_set, pos_list):
for v in pos_list:
if v in pos_set:
pos_set.remove(v)
return list(pos_set)
def draw_line(x, y, width, height):
pos_set = []
for r in range(height):
for c in range(width):
pos_set.append((x + c, y + r))
return pos_set
def open_the_door(x_s, y_s, w, h, unit):
pos_list = []
n_door = 15
random_horizon_list_x = [x_s + (2 * np.random.choice(w // 2 // unit, n_door) + 1) * unit, x_s + (2 * np.random.choice(w // 2 // unit, n_door) - 1) * unit]
random_vertical_list_y = [y_s + (2 * np.random.choice(h // 2 // unit, n_door) + 1) * unit, y_s + (2 * np.random.choice(h // 2 // unit, n_door) + 1) * unit]
y_e = y_s + h - unit
for v in random_horizon_list_x[0]:
pos_list.extend([(v, y_s), (v + 1, y_s), (v, y_s + 1), (v + 1, y_s + 1)])
for v in random_horizon_list_x[1]:
pos_list.extend([(v, y_e), (v + 1, y_e), (v, y_e + 1), (v + 1, y_e + 1)])
x_e = x_s + w - unit
for v in random_vertical_list_y[0]:
pos_list.extend([(x_s, v), (x_s, v + 1), (x_s + 1, v), (x_s + 1, v + 1)])
for v in random_vertical_list_y[1]:
pos_list.extend([(x_e, v), (x_e, v + 1), (x_e + 1, v), (x_e + 1, v + 1)])
return pos_list
def create_maze(pos, width, height, unit, font_area):
# draw block: with rect: left(x), top(y), width, height
pos_set = []
for i in range(height):
if i % 2 == 0:
pos_set.extend(draw_line(pos[0], pos[1] + i * unit, width * unit, unit))
pos_set.extend(draw_line(pos[0], pos[1] + font_area[1] + i * unit, width * unit, unit))
pos_set.extend(draw_line(pos[0] + i * unit, pos[1] + height * unit, unit, font_area[1]))
pos_set.extend(draw_line(pos[0] + font_area[0] + i * unit, pos[1] + height * unit, unit, font_area[1]))
for i in range(width):
if i % 2 == 0:
pos_set.extend(draw_line(pos[0] + i * unit, pos[1], unit, height * unit))
pos_set.extend(draw_line(pos[0] + i * unit, pos[1] + font_area[1], unit, height * unit))
pos_set.extend(draw_line(pos[0], pos[1] + i * unit, height * unit, unit))
pos_set.extend(draw_line(pos[0] + font_area[0], pos[1] + i * unit, height * unit, unit))
pos_set = set(pos_set)
dfs(pos[0] + 2, pos[1] + 2, (width - 1) * unit, (height - 1) * unit, unit, pos_set) # north
dfs(pos[0] + 2, pos[1] + (height - 2) * unit, (height - 1) * unit, (width + 3) * unit, unit, pos_set) # west
dfs(pos[0] + height * unit, pos[1] + font_area[1] - unit, (width - height) * unit, (height - 1) * unit, unit, pos_set) # south
dfs(pos[0] + font_area[0] - unit, pos[1] + (height - 2) * unit, (height - 1) * unit, font_area[1] - (height + 1) * unit, unit, pos_set) # east
temp = []
temp.extend(open_the_door(pos[0], pos[1], font_area[0] + height * unit, font_area[1] + height * unit, unit))
res = clean_pos_set_convert_to_list(pos_set, temp)
return res
def load_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 12})
goal = cfg.register_agent_type(
"goal",
{'width': 1, 'length': 1,
'can_absorb': True
}
)
agent = cfg.register_agent_type(
"agent",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 2,
'view_range': gw.CircleRange(6),
'damage': 2, 'step_recover': -10.0/400,
'step_reward': 0,
})
g_goal = cfg.add_group(goal)
g_agent = cfg.add_group(agent)
g = gw.AgentSymbol(g_goal, 'any')
a = gw.AgentSymbol(g_agent, 'any')
cfg.add_reward_rule(gw.Event(a, 'collide', g), receiver=a, value=10)
return cfg
def generate_map(mode, env, map_size, goal_handle, handles, messages, font):
# pre-process message
max_len = 8
new = []
for msg in messages:
if len(msg) > max_len:
for i in range(0, len(msg), max_len):
new.append(msg[i:i+max_len])
else:
new.append(msg)
messages = new
center_x, center_y = map_size // 2, map_size // 2
# create maze
if mode == 1:
radius = 90
pos_list = create_maze([center_x - radius, center_y - radius], radius + 1, 15, 2, font_area=[radius * 2 - 28, radius * 2 - 28])
env.add_walls(method="custom", pos=pos_list)
def add_square(pos, side, gap):
side = int(side)
for x in range(center_x - side//2, center_x + side//2 + 1, gap):
pos.append([x, center_y - side//2])
pos.append([x, center_y + side//2])
for y in range(center_y - side//2, center_y + side//2 + 1, gap):
pos.append([center_x - side//2, y])
pos.append([center_x + side//2, y])
def draw(base_x, base_y, scale, data):
w, h = len(data), len(data[0])
pos = []
for i in range(w):
for j in range(h):
if data[i][j] == 1:
start_x = i * scale + base_y
start_y = j * scale + base_x
for x in range(start_x, start_x + scale):
for y in range(start_y, start_y + scale):
pos.append([y, x])
env.add_agents(goal_handle, method="custom", pos=pos)
base_y = (map_size - len(messages) * font.height) // 2
for message in messages:
base_x = (map_size - len(message) * font.width) // 2
scale = 1
for x in message:
data = font.get(x)
draw(base_x, base_y, scale, data)
base_x += font.width
base_y += font.height + 1
alpha_goal_num = env.get_num(goal_handle)
# agent
pos = []
add_square(pos, map_size * 0.95, 1)
add_square(pos, map_size * 0.90, 1)
add_square(pos, map_size * 0.85, 1)
add_square(pos, map_size * 0.80, 1)
pos = np.array(pos)
pos = pos[np.random.choice(np.arange(len(pos)), int(alpha_goal_num * 1.6), replace=False)]
env.add_agents(handles[0], method="custom", pos=pos)
class ArrangeServer(BaseServer):
def get_banners(self, frame_id, resolution):
return []
def keydown(self, frame_id, key, mouse_x, mouse_y):
return False
def get_status(self, frame_id):
if self.done:
return None
else:
return True
def get_endscreen(self, frame_id):
return []
def mousedown(self, frame_id, key, mouse_x, mouse_y):
return False
def get_info(self):
ret = self.env._get_groups_info()
ret[1] = ret[0]
return (self.map_size, self.map_size), ret, {'wall': self.env._get_walls_info()}
def __init__(self, path="data/arrange_model", messages=None, mode=1):
# some parameter
map_size = 250
eps = 0.15
# init the game
env = magent.GridWorld(load_config(map_size))
font = FontProvider('data/font_8x8/basic.txt')
handles = env.get_handles()
food_handle, handles = handles[0], handles[1:]
models = []
models.append(DeepQNetwork(env, handles[0], 'arrange', use_conv=True))
# load model
models[0].load(path, 10)
# init environment
env.reset()
generate_map(mode, env, map_size, food_handle, handles, messages, font)
# save to member variable
self.env = env
self.food_handle = food_handle
self.handles = handles
self.eps = eps
self.models = models
self.done = False
self.map_size = map_size
self.new_rule_ct = 0
self.pos_reward_ct = set()
self.num = None
self.ct = 0
def step(self):
handles = self.handles
models = self.models
env = self.env
center_x = self.map_size // 2
center_y = self.map_size
for j in range(2):
obs = [env.get_observation(handle) for handle in handles]
ids = [env.get_agent_id(handle) for handle in handles]
for i in range(len(handles)):
if self.new_rule_ct > 0:
obs[i][1][:, 10:12] = 0
else:
obs[i][1][:, 10:12] = 1
acts = models[i].infer_action(obs[i], ids[i], 'e_greedy', eps=self.eps)
env.set_action(handles[i], acts)
done = env.step()
goal_num = env.get_num(self.food_handle)
rewards = env.get_reward(handles[0])
for id_, r in zip(ids[0], rewards):
if r > 0.05 and id_ not in self.pos_reward_ct:
self.pos_reward_ct.add(id_)
if 1.0 * len(self.pos_reward_ct) / goal_num >= 0.99:
self.new_rule_ct += 1
self.num = [env.get_num(handle) for handle in [self.food_handle] + handles]
env.clear_dead()
if done:
break
return done
def get_data(self, frame_id, x_range, y_range):
start = time.time()
if not self.done:
self.done = self.step()
print(self.done)
if self.done:
print("done!")
pos, event = self.env._get_render_info(x_range, y_range)
print(" fps ", 1 / (time.time() - start))
return pos, event
def add_agents(self, x, y, g):
pos = []
for i in range(-3, 3):
for j in range(-3, 3):
pos.append((x + i, y + j))
self.env.add_agents(self.handles[g], method="custom", pos=pos)
def get_map_size(self):
return self.map_size, self.map_size
def get_numbers(self):
return self.num
| 12,054 | 31.319035 | 159 | py |
MAgent | MAgent-master/python/magent/renderer/server/battle_server.py | import math
import time
import matplotlib.pyplot as plt
import numpy as np
import magent
from magent.builtin.tf_model import DeepQNetwork
from magent.renderer.server import BaseServer
def load_config(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
cfg.set({"minimap_mode": True})
cfg.set({"embedding_size": 10})
small = cfg.register_agent_type(
"small",
{'width': 1, 'length': 1, 'hp': 10, 'speed': 2,
'view_range': gw.CircleRange(6), 'attack_range': gw.CircleRange(1.5),
'damage': 2, 'step_recover': 0.1,
'step_reward': -0.001, 'kill_reward': 100, 'dead_penalty': -0.05, 'attack_penalty': -1,
})
g0 = cfg.add_group(small)
g1 = cfg.add_group(small)
a = gw.AgentSymbol(g0, index='any')
b = gw.AgentSymbol(g1, index='any')
cfg.add_reward_rule(gw.Event(a, 'attack', b), receiver=a, value=2)
cfg.add_reward_rule(gw.Event(b, 'attack', a), receiver=b, value=2)
return cfg
def generate_map(env, map_size, handles):
width = map_size
height = map_size
init_num = 20
gap = 3
leftID, rightID = 0, 1
# left
pos = []
for y in range(10, 45):
pos.append((width / 2 - 5, y))
pos.append((width / 2 - 4, y))
for y in range(50, height // 2 + 25):
pos.append((width / 2 - 5, y))
pos.append((width / 2 - 4, y))
for y in range(height // 2 - 25, height - 50):
pos.append((width / 2 + 5, y))
pos.append((width / 2 + 4, y))
for y in range(height - 45, height - 10):
pos.append((width / 2 + 5, y))
pos.append((width / 2 + 4, y))
env.add_walls(pos=pos, method="custom")
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width // 2 - gap - side, width // 2 - gap - side + side, 2):
for y in range((height - side) // 2, (height - side) // 2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[leftID], method="custom", pos=pos)
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width // 2 + gap, width // 2 + gap + side, 2):
for y in range((height - side) // 2, (height - side) // 2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[rightID], method="custom", pos=pos)
class BattleServer(BaseServer):
def __init__(self, path="data/battle_model", total_step=1000, add_counter=10, add_interval=50):
# some parameter
map_size = 125
eps = 0.05
# init the game
env = magent.GridWorld(load_config(map_size))
handles = env.get_handles()
models = []
models.append(DeepQNetwork(env, handles[0], 'trusty-battle-game-l', use_conv=True))
models.append(DeepQNetwork(env, handles[1], 'trusty-battle-game-r', use_conv=True))
# load model
models[0].load(path, 0, 'trusty-battle-game-l')
models[1].load(path, 0, 'trusty-battle-game-r')
# init environment
env.reset()
generate_map(env, map_size, handles)
# save to member variable
self.env = env
self.handles = handles
self.eps = eps
self.models = models
self.map_size = map_size
self.total_step = total_step
self.add_interval = add_interval
self.add_counter = add_counter
self.done = False
print(env.get_view2attack(handles[0]))
plt.show()
def get_info(self):
return (self.map_size, self.map_size), self.env._get_groups_info(), {'wall': self.env._get_walls_info()}
def step(self):
handles = self.handles
models = self.models
env = self.env
obs = [env.get_observation(handle) for handle in handles]
ids = [env.get_agent_id(handle) for handle in handles]
counter = []
for i in range(len(handles)):
acts = models[i].infer_action(obs[i], ids[i], 'e_greedy', eps=self.eps)
env.set_action(handles[i], acts)
counter.append(np.zeros(shape=env.get_action_space(handles[i])))
for j in acts:
counter[-1][j] += 1
# plt.clf()
# for c in counter:
# plt.bar(range(len(c)), c / np.sum(c))
# plt.draw()
# plt.pause(1e-8)
# code for checking the correctness of observation
# for channel in range(7):
# x = magent.round(list(obs[1][0][0][:,:,channel]), 2)
# for row in x:
# print row
# print("-------------")
# input()
done = env.step()
env.clear_dead()
return done
def get_data(self, frame_id, x_range, y_range):
start = time.time()
if self.done:
return None
self.done = self.step()
pos, event = self.env._get_render_info(x_range, y_range)
print(" fps ", 1 / (time.time() - start))
return pos, event
def add_agents(self, x, y, g):
pos = []
for i in range(-5, 5):
for j in range(-5, 5):
pos.append((x + i, y + j))
self.env.add_agents(self.handles[g], method="custom", pos=pos)
pos = []
x = np.random.randint(0, self.map_size - 1)
y = np.random.randint(0, self.map_size - 1)
for i in range(-5, 5):
for j in range(-5, 6):
pos.append((x + i, y + j))
self.env.add_agents(self.handles[g ^ 1], method="custom", pos=pos)
def get_map_size(self):
return self.map_size, self.map_size
def get_banners(self, frame_id, resolution):
red = '{}'.format(self.env.get_num(self.handles[0])), (200, 0, 0)
vs = ' vs ', (0, 0, 0)
blue = '{}'.format(self.env.get_num(self.handles[1])), (0, 0, 200)
result = [(red, vs, blue)]
tmp = '{} chance(s) remained'.format(
max(0, self.add_counter)), (0, 0, 0)
result.append((tmp,))
tmp = '{} / {} steps'.format(frame_id, self.total_step), (0, 0, 0)
result.append((tmp,))
if frame_id % self.add_interval == 0 and frame_id < self.total_step and self.add_counter > 0:
tmp = 'Please press your left mouse button to add agents', (0, 0, 0)
result.append((tmp,))
return result
def get_status(self, frame_id):
if frame_id % self.add_interval == 0 and self.add_counter > 0:
return False
elif frame_id >= self.total_step or self.done:
return None
else:
return True
def keydown(self, frame_id, key, mouse_x, mouse_y):
return False
def mousedown(self, frame_id, pressed, mouse_x, mouse_y):
if frame_id % self.add_interval == 0 and frame_id < self.total_step and pressed[0] \
and self.add_counter > 0 and not self.done:
self.add_counter -= 1
pos = []
for i in range(-5, 5):
for j in range(-5, 5):
pos.append((mouse_x + i, mouse_y + j))
self.env.add_agents(self.handles[0], method="custom", pos=pos)
pos = []
x = np.random.randint(0, self.map_size - 1)
y = np.random.randint(0, self.map_size - 1)
for i in range(-5, 6):
for j in range(-5, 5):
pos.append((x + i, y + j))
self.env.add_agents(self.handles[1], method="custom", pos=pos)
return True
return False
def get_endscreen(self, frame_id):
if frame_id == self.total_step or self.done:
if self.env.get_num(self.handles[0]) > self.env.get_num(self.handles[1]):
return [(("You", (200, 0, 0)), (" win! :)", (0, 0, 0)))]
else:
return [(("You", (200, 0, 0)), (" lose. :(", (0, 0, 0)))]
else:
return []
| 7,905 | 31.941667 | 112 | py |
MAgent | MAgent-master/python/magent/renderer/server/__init__.py | from .base_server import BaseServer
from .sample_server import SampleServer
from .random_server import RandomServer
from .battle_server import BattleServer
from .arrange_server import ArrangeServer
| 198 | 32.166667 | 41 | py |
MAgent | MAgent-master/scripts/plot_many.py | """plot curve from many log files"""
import sys
import matplotlib.pyplot as plt
import numpy as np
rec_filename = sys.argv[1]
plot_key = sys.argv[2]
list_col_index = int(sys.argv[3]) if len(sys.argv) > 3 else -1
silent = sys.argv[-1] == '--silent'
def parse_pair(item):
"""parse pair \tkey: value\t """
split_index = item.find(":")
key = item[:split_index].strip()
value = item[split_index+1:].strip()
return key, value
def parse_log_file(filename, begin_item_index=0):
"""log_file format \tkey: value\t key:value\t key:value\t ... """
ret = {}
with open(filename, 'r') as fin:
for line in fin.readlines():
items = line.split('\t')
if len(items) < 1: # ignore error
continue
for item in items[begin_item_index:]:
key, value = parse_pair(item)
if key not in ret:
ret[key] = []
ret[key].append(value)
return ret
rec_dict = parse_log_file(rec_filename)
legend = []
data = []
for log_file_name in rec_dict["log_file"]: # parse every file
log_dict = parse_log_file(log_file_name)
now = log_dict[plot_key]
tmp = eval(now[0])
if isinstance(tmp, list): # is list, expand it
col_num = len(tmp)
for row in range(len(now)):
now[row] = eval(now[row])
now = np.array(now)
print(now)
if list_col_index == -1:
for col in range(col_num):
legend.append(log_file_name + "-" + str(col))
data.append(now[:,col])
else:
legend.append(log_file_name)
data.append(now[:,list_col_index])
else: # is a scalar
for i in range(len(now)):
now[i] = eval(now[i])
legend.append(log_file_name)
data.append(now)
data = np.array(data)
print(legend)
print(data)
plt.plot(data.T)
plt.legend(legend)
plt.savefig(rec_filename + ".png")
if not silent:
plt.show()
| 1,994 | 24.576923 | 71 | py |
MAgent | MAgent-master/scripts/plot_log.py | """plot general log file according to given indexes"""
import sys
import matplotlib.pyplot as plt
import numpy as np
filename = sys.argv[1]
data = []
with open(filename, 'r') as fin:
for line in fin.readlines():
items = line.split('\t')
row = []
for item in items[1:]:
t = eval(item.split(':')[1])
if isinstance(t, list):
for x in t:
row.append(x)
else:
row.append(t)
if len(row) > 0:
data.append(row)
data = np.array(data)
for index in sys.argv[2:]:
index = int(index)
plt.plot(data[:, index])
plt.show()
| 658 | 18.969697 | 54 | py |
MAgent | MAgent-master/scripts/plot_reward.py | """deprecated"""
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
import numpy as np
import sys
filename = sys.argv[1]
data = []
with open(filename) as fin:
for i, row in enumerate(fin.readlines()):
row = eval(row)
data.append(row)
#if i > max_n:
# break
move_ave = None
alpha = 0.5
n = len(data)
print(n)
for i, row in enumerate(data):
row = np.array(row)
row = row + 2
row = np.where(row > 0, row, np.zeros_like(row))
if move_ave is None:
move_ave = row
else:
move_ave = alpha * move_ave + (1 - alpha) * row
lin = np.arange(len(row))
row = np.log(row + 1e-5)
lin = np.log(lin + 1)
plt.plot(lin, move_ave, color=hsv_to_rgb((0.33 - 0.33 * i / n,1,1)))
plt.show()
| 785 | 19.684211 | 72 | py |
MAgent | MAgent-master/scripts/plot_heat.py | """plot a heatmap for tournament"""
import matplotlib.pyplot as plt
import numpy as np
def plot_heatmap(x, y, z):
x, y = np.meshgrid(y, x)
fig, ax = plt.subplots()
im = ax.pcolormesh(x, y, z)
fig.colorbar(im)
def smooth(data, alpha, beta=None):
beta = beta or alpha
for i in range(0, len(data)):
for j in range(1, len(data[0])):
data[i][j] = alpha * data[i][j-1] + (1-alpha) * data[i][j]
for j in range(0, len(data[0])):
for i in range(1, len(data)):
data[i][j] = alpha * data[i-1][j] + (1-alpha) * data[i][j]
return data
filename = "detail.log"
data = []
round2index = {}
ct = 0
with open(filename) as fin:
for line in fin.readlines():
item = line.split("\t")
l = eval(item[0].split(":")[1])[2]
r = eval(item[1].split(":")[1])[2]
rate = eval(item[2].split(":")[1])
num = eval(item[3].split(":")[1])
for no in [l, r]:
if no not in round2index:
round2index[no] = ct
ct += 1
data.append([l, r, rate, num])
heat_data = [[0.5 for _ in range(ct)] for _ in range(ct)]
for line in data:
l = round2index[line[0]]
r = round2index[line[1]]
rate = line[2]
num = line[3]
heat_data[l][r] = rate
heat_data[r][l] = 1 - rate
heat_data = smooth(heat_data, 0.8)
heat_data = np.array(heat_data)
rounds = np.sort(np.array(round2index.keys()))
pick = 60
heat_data = heat_data[:pick,:pick]
rounds = rounds[:pick]
plot_heatmap(rounds, rounds, heat_data)
plt.show()
| 1,558 | 22.621212 | 70 | py |
MAgent | MAgent-master/scripts/plot.py | """dynamic plot class"""
import matplotlib.pyplot as plt
class DynamicPlot:
def __init__(self, n):
self.x_data = []
self.y_datas = []
self.lines = []
plt.show()
axes = plt.gca()
for i in range(n):
self.y_datas.append([])
line, = axes.plot(self.x_data, self.y_datas[-1])
self.lines.append(line)
self.axes = axes
def add_point(self, x, ys):
self.x_data.append(x)
for i in range(len(ys)):
self.y_datas[i].append(ys[i])
def redraw(self):
for i in range(len(self.lines)):
self.lines[i].set_xdata(self.x_data)
self.lines[i].set_ydata(self.y_datas[i])
self.axes.autoscale(True)
self.axes.relim()
x_left, x_right = self.axes.get_xlim()
y_left, y_right = self.axes.get_ylim()
self.axes.set_xlim(x_left, (int(x_right) / 100 + 1) * 100)
self.axes.set_ylim(0, y_right * 1.2)
plt.draw()
plt.pause(1e-15)
def save(self, filename):
plt.savefig(filename)
| 1,093 | 23.863636 | 66 | py |
MAgent | MAgent-master/scripts/tournament.py | """let saved models to play tournament"""
import os
import numpy as np
import time
import re
import math
import magent
from magent.builtin.tf_model import DeepQNetwork
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def play(env, handles, models, map_size, leftID, rightID, eps=0.05):
env.reset()
# generate map
width = height = map_size
init_num = map_size * map_size * 0.04
gap = 3
# left
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 - gap - side, width//2 - gap - side + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[leftID], method="custom", pos=pos)
# right
n = init_num
side = int(math.sqrt(n)) * 2
pos = []
for x in range(width//2 + gap, width//2 + gap + side, 2):
for y in range((height - side)//2, (height - side)//2 + side, 2):
pos.append([x, y, 0])
env.add_agents(handles[rightID], method="custom", pos=pos)
step_ct = 0
done = False
n = 2
obs = [[] for _ in range(n)]
ids = [[] for _ in range(n)]
acts = [[] for _ in range(n)]
nums = [env.get_num(handle) for handle in handles]
while not done:
# take actions for every model
for i in range(n):
obs[i] = env.get_observation(handles[i])
ids[i] = env.get_agent_id(handles[i])
models[i].infer_action(obs[i], ids[i], 'e_greedy', eps, block=False)
for i in range(n):
acts[i] = models[i].fetch_action()
env.set_action(handles[i], acts[i])
done = env.step()
nums = [env.get_num(handle) for handle in handles]
env.clear_dead()
step_ct += 1
if step_ct > 550:
break
return nums
def extract_model_names(savedir, name, model_class, begin=0, pick_every=4):
if model_class is DeepQNetwork:
prefix = 'tfdqn'
pattern = re.compile(prefix + '_(\d*).meta')
ret = []
for path in os.listdir(os.path.join(savedir, name)):
match = pattern.match(path)
if match and int(match.group(1)) > begin:
ret.append((savedir, name, int(match.group(1)), model_class))
ret.sort(key=lambda x: x[2])
ret = [ret[i] for i in range(0, len(ret), pick_every)]
return ret
if __name__ == '__main__':
map_size = 125
env = magent.GridWorld("battle", map_size=map_size)
env.set_render_dir("build/render")
# scan file names
model_name = extract_model_names('save_model', 'battle', DeepQNetwork, begin=0, pick_every=5)
print("total models = %d" % len(model_name))
print("models", [x[:-1] for x in model_name])
handles = env.get_handles()
def play_wrapper(model_names, n_rounds):
time_stamp = time.time()
models = []
for i, item in enumerate(model_names):
models.append(magent.ProcessingModel(env, handles[i], item[1], 0, item[-1]))
for i, item in enumerate(model_names):
models[i].load(item[0], item[2])
leftID, rightID = 0, 1
result = 0
total_num = np.zeros(2)
for _ in range(n_rounds):
round_num = play(env, handles, models, map_size, leftID, rightID)
total_num += round_num
leftID, rightID = rightID, leftID
result += 1 if round_num[0] > round_num[1] else 0
result = 1.0 * result
for model in models:
model.quit()
return result / n_rounds, total_num / n_rounds, time.time() - time_stamp
detail_file = open("detail.log", "w")
winrate_file = open("win_rate.log", "w")
rate = [[0.0 for j in range(len(model_name))] for i in range(len(model_name))]
for i in range(len(model_name)):
for j in range(i+1, len(model_name)):
rate[i][j], nums, elapsed = play_wrapper([model_name[i], model_name[j]], 6)
rate[j][i] = 1.0 - rate[i][j]
round_res = ("model1: %s\t model2: %s\t rate: %.2f\t num: %s\t elapsed: %.2f" %
(model_name[i][:-1], model_name[j][:-1], rate[i][j], list(nums), elapsed))
print(round_res)
detail_file.write(round_res + "\n")
winrate_file.write("model: %s\twin rate: %.2f\n" % (model_name[i],
1.0 * sum(np.asarray(rate[i])) / (len(model_name) - 1)))
detail_file.flush()
winrate_file.flush()
| 4,484 | 30.363636 | 116 | py |
MAgent | MAgent-master/scripts/rename.py | """rename tensorflow models"""
import sys
import magent
from magent.builtin.tf_model import DeepQNetwork
env = magent.GridWorld("battle", map_size=125)
handles = env.get_handles()
rounds = eval(sys.argv[1])
for i in [rounds]:
model = DeepQNetwork(env, handles[0], "battle")
print("load %d" % i)
model.load("data/", i, "selfplay")
print("save %d" % i)
model.save("data/battle_model", i)
| 412 | 19.65 | 51 | py |
MAgent | MAgent-master/scripts/test/test_1m.py | """test one million random agents"""
import time
import magent
import os
import math
import argparse
from magent.builtin.rule_model import RandomActor
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_forest(map_size):
gw = magent.gridworld
cfg = gw.Config()
cfg.set({"map_width": map_size, "map_height": map_size})
predator = cfg.register_agent_type(
"predator",
{
'width': 2, 'length': 2, 'hp': 1, 'speed': 1,
'view_range': gw.CircleRange(5), 'attack_range': gw.CircleRange(2),
'attack_penalty': -0.2
})
prey = cfg.register_agent_type(
"prey",
{
'width': 1, 'length': 1, 'hp': 1, 'speed': 1.5,
'view_range': gw.CircleRange(4), 'attack_range': gw.CircleRange(0)
})
predator_group = cfg.add_group(predator)
prey_group = cfg.add_group(prey)
a = gw.AgentSymbol(predator_group, index='any')
b = gw.AgentSymbol(prey_group, index='any')
cfg.add_reward_rule(gw.Event(a, 'attack', b), receiver=[a, b], value=[1, -1])
return cfg
def measure_time(msg, func, *args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
print("%-11s %.5f" % (msg, time.time() - start_time))
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--n_step", type=int, default=20)
parser.add_argument("--agent_number", type=int, default=1000)
parser.add_argument("--num_gpu", type=int, default=0)
parser.add_argument('--frame', default='tf', choices=['tf', 'mx'])
args = parser.parse_args()
n_step = args.n_step
agent_number = args.agent_number
skip = 20 # warm up steps
n_step += skip
# init the game "forest" (or "battle" here)
env = magent.GridWorld(load_forest(int(math.sqrt(agent_number * 20))))
env.reset()
# add two groups of animals
deer_handle, tiger_handle = env.get_handles()
env.add_walls(method="random", n=agent_number / 10)
env.add_agents(deer_handle, method="random", n=agent_number / 2)
env.add_agents(tiger_handle, method="random", n=agent_number / 2)
# init two models
if args.num_gpu == 0:
model1 = RandomActor(env, deer_handle, "deer")
model2 = RandomActor(env, tiger_handle, "tiger")
else:
if args.frame == 'tf':
from magent.builtin.tf_model import DeepQNetwork
else:
from magent.builtin.mx_model import DeepQNetwork
model1 = DeepQNetwork(env, deer_handle, "deer", num_gpu=args.num_gpu, infer_batch_size=100000)
model2 = DeepQNetwork(env, tiger_handle, "tiger", num_gpu=args.num_gpu, infer_batch_size=100000)
total_reward = 0
print(env.get_view_space(deer_handle))
print(env.get_view_space(tiger_handle))
total_time = 0
for i in range(n_step):
print("===== step %d =====" % i)
start_time = time.time()
obs_1 = measure_time("get obs 1", env.get_observation, deer_handle)
acts_1 = measure_time("infer act 1", model1.infer_action, obs_1, None)
measure_time("set act 1", env.set_action, deer_handle, acts_1)
obs_2 = measure_time("get obs 2", env.get_observation, tiger_handle)
acts_2 = measure_time("infer act 2", model2.infer_action, obs_2, None)
measure_time("set act 2", env.set_action, tiger_handle, acts_2)
# simulate one step
done = measure_time("step", env.step)
# get reward
rewards = measure_time("get reward", env.get_reward, tiger_handle)
total_reward += sum(rewards)
measure_time("clear", env.clear_dead)
step_time = time.time() - start_time
if i >= skip:
total_time += step_time
print("all time: %.2f\n" % (step_time))
# print info
print("number of deer: %d" % env.get_num(deer_handle))
print("number of tiger: %d" % env.get_num(tiger_handle))
print("total reward: %d" % total_reward)
if done:
print("game over")
break
print("FPS", (n_step - skip) / total_time)
| 4,111 | 30.630769 | 104 | py |
MAgent | MAgent-master/scripts/test/test_examples.py | """test examples"""
import os
import time
source = [
"examples/train_tiger.py",
"examples/train_pursuit.py",
"examples/train_gather.py",
"examples/train_battle.py",
"examples/train_single.py",
"examples/train_arrange.py",
"examples/train_multi.py",
]
def do_cmd(cmd):
tic = time.time()
print(cmd)
assert os.system(cmd) == 0
return time.time() - tic
start = time.time()
for item in source:
run_cmd = "python %s --train --n_round 1" % item
do_cmd(run_cmd)
change_cmd = "sed -i 's/tf_model/mx_model/g' %s" % item
do_cmd(change_cmd)
do_cmd(run_cmd)
print("test examples done", time.time() - start)
| 667 | 18.085714 | 59 | py |
MAgent | MAgent-master/scripts/test/search.py | """do search task"""
import os
import sys
import argparse
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def do_task(task_item):
recorder = open(task_item["name"] + "-rec.out", "w")
for value in task_item["arg_value"]:
tmp_name = task_item["name"] + "-" + value
cmd = " ".join([task_item["prefix"], task_item["arg_name"], value])
# cmd = "time " + cmd
cmd += " --name " + tmp_name
cmd = cmd + " >> " + tmp_name + ".out"
print("%s : %s" % (tmp_name, cmd))
start = time.time()
os.system(cmd)
use_time = time.time() - start
recorder.write("log_file: %s\t time: %.2f\n" % (tmp_name + ".log", use_time))
recorder.close()
| 719 | 26.692308 | 85 | py |
MAgent | MAgent-master/scripts/test/test_against.py | """test baselines in battle against"""
from search import do_task
task = [
{
"name": "against",
"type": "single-search",
"prefix": "python examples/train_against.py --train --save_every 100 --n_round 500",
"arg_name": "--alg",
"arg_value": ["a2c", "drqn", "dqn"]
}
]
for item in task:
do_task(item)
print("%s done" % item['name'])
print("battle-against all done")
print("plot curve: python scripts/plot_many.py against-rec.out num 0")
| 496 | 21.590909 | 92 | py |
MAgent | MAgent-master/scripts/test/test_fps.py | """test fps"""
import os
import sys
import magent
import argparse
if len(sys.argv) < 2:
print("usage python test_fps.py max_gpu frame")
parser = argparse.ArgumentParser()
parser.add_argument("--max_gpu", type=int, default=0)
parser.add_argument("--frame", type=str, default='tf')
parser.add_argument("--name", type=str, default="fps")
args = parser.parse_args()
tmp_name = 'tmp-' + args.name
max_gpu = args.max_gpu
framework = args.frame
number = [1000, 10000, 100000, 1000000]
gpus = range(max_gpu+1)
ret = []
for n in number:
row = []
for g in gpus:
n_step = 30000000 / n
cmd = ("python scripts/test/test_1m.py --n_step %d --agent_number %d --num_gpu %d --frame %s > /dev/shm/aha "
"&& cat /dev/shm/aha | grep FPS > %s" % (n_step, n, g, framework, tmp_name))
if n < 1000000:
cmd = 'OMP_NUM_THREADS=8 ' + cmd
else:
cmd = 'OMP_NUM_THREADS=16 ' + cmd
print(cmd)
os.system(cmd)
with open(tmp_name) as fin:
line = fin.readline()
x = eval(line)[1]
row.append(x)
print(x)
ret.append(row)
for row in ret:
print(magent.round(row))
| 1,192 | 23.346939 | 117 | py |
MAgent | MAgent-master/scripts/test/test_tiger.py | """test baselines in double attack"""
from search import do_task
task = [
{
"name": "tiger",
"type": "single-search",
"prefix": "python examples/train_tiger.py --train --n_round 250",
"arg_name": "--alg",
"arg_value": ["dqn", "a2c", "drqn"]
}
]
for item in task:
do_task(item)
print("%s done" % item['name'])
print("tiger all done")
print("plot curve: python scripts/plot_many.py tiger-rec.out reward")
| 463 | 21.095238 | 73 | py |
filtered-sliced-optimal-transport | filtered-sliced-optimal-transport-main/render_two_class_pointset.py | import numpy as np
import matplotlib.pyplot as plt
import sys
# Create data
colors = (0,0,0)
area = np.pi*3*4*4*4
x = np.zeros([65536]) # max size of the pointset to load
y = np.zeros([65536])
f = open(str(sys.argv[1]), "r")
u = 0
for t in f:
line = t.split()
x[int(u)] = float(line[0])
y[int(u)] = float(line[1])
u += 1
print(u)
fig = plt.figure(figsize=(int(np.sqrt(u)/2), int(np.sqrt(u)/2)), dpi=80)
ax = fig.add_subplot(1, 1, 1)
plt.scatter(x[0:int(u/2)], y[0:int(u/2)], s=area, c=(1, 0, 0), alpha=1.0)
plt.scatter(x[int(u/2):u], y[int(u/2):u], s=area, c=(0, 0, 1), alpha=1.0)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.tight_layout()
plt.savefig(str(sys.argv[2]))
| 861 | 20.55 | 73 | py |
filtered-sliced-optimal-transport | filtered-sliced-optimal-transport-main/render_stippling.py | import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import cv2 as cv
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import matplotlib.image as mpimg
x = np.zeros([32*32*4*4*4*4])
y = np.zeros([32*32*4*4*4*4])
f = open(str(sys.argv[1]), "r")
area = int(np.pi*3*4*2*3*2*2)
count = 0
for t in f:
line = t.split()
x[int(count)] = float(line[0])
y[int(count)] = float(line[1])
count += 1
input_img = cv.imread(str(sys.argv[3]))
aspect_ratio = input_img.shape[0]/float(input_img.shape[1])
fig = plt.figure(figsize=(int(np.sqrt(count)/1.6), int(np.sqrt(count)*aspect_ratio/1.6)), dpi=80)
if(aspect_ratio < 1):
fig = plt.figure(figsize=(int(np.sqrt(count)*(1.0/aspect_ratio)/1.6), int(np.sqrt(count)/1.6)), dpi=80)
ax = fig.add_subplot(1, 1, 1)
plt.scatter(x[:count], y[:count],s=area, c=(0,0,0))
#ax.set_xlim([0.0, 1.0])
#ax.set_ylim([0.0, 1.0])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.tight_layout()
plt.savefig(str(sys.argv[2]),bbox_inches='tight',pad_inches = 0)
| 1,204 | 27.023256 | 107 | py |
filtered-sliced-optimal-transport | filtered-sliced-optimal-transport-main/render_progressive_pointset.py | import numpy as np
import matplotlib.pyplot as plt
import sys
# Create data
colors = (0,0,0)
area = np.pi*3*4*4*4
x = np.zeros([65536]) # max size of the pointset to load
y = np.zeros([65536])
f = open(str(sys.argv[1]), "r")
u = 0
for t in f:
line = t.split()
x[int(u)] = float(line[0])
y[int(u)] = float(line[1])
u += 1
print(u)
fig = plt.figure(figsize=(int(np.sqrt(u)/2), int(np.sqrt(u)/2)), dpi=80)
ax = fig.add_subplot(1, 1, 1)
# add specified number of subdivisions
nb_div = int(sys.argv[3])
color_grad = range(0,nb_div)
color_grad = [(a/nb_div,0,1.0-a/nb_div) for a in color_grad]
for subiv in range(0,nb_div):
plt.scatter(x[int((subiv)*u/nb_div):int((subiv+1)*u/nb_div)], y[int((subiv)*u/nb_div):int((subiv+1)*u/nb_div)], s=area, c=color_grad[subiv], alpha=1.0)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.tight_layout()
plt.savefig(str(sys.argv[2]))
| 1,055 | 22.466667 | 155 | py |
filtered-sliced-optimal-transport | filtered-sliced-optimal-transport-main/render_color_img.py | import numpy as np
import matplotlib.pyplot as plt
import sys
import cv2 as cv
# Create data
colors = (0,0,0)
area = int(np.pi*3*4*2*3*2*2)
x = np.zeros([65536])
y = np.zeros([65536])
f = open(str(sys.argv[1]), "r")
u = 0
for t in f:
line = t.split()
x[int(u)] = float(line[0])
y[int(u)] = float(line[1])
u += 1
print(u)
input_img = cv.imread(str(sys.argv[3]))
aspect_ratio = input_img.shape[0]/float(input_img.shape[1])
fig = plt.figure(figsize=(int(np.sqrt(u)/1.6), int(np.sqrt(u)*aspect_ratio/1.6)), dpi=80)
if(aspect_ratio < 1):
fig = plt.figure(figsize=(int(np.sqrt(u)*(1.0/aspect_ratio)/1.6), int(np.sqrt(u)/1.6)), dpi=80)
ax = fig.add_subplot(1, 1, 1)
plt.scatter(x[0:int(u*float(sys.argv[4]))], y[0:int(u*float(sys.argv[4]))], s=area, c=(0,1,1), alpha=1.0)
plt.scatter(x[int(u*float(sys.argv[4])):int(u*float(sys.argv[5]))], y[int(u*float(sys.argv[4])):int(u*float(sys.argv[5]))], s=area, c=(1.0,0.0,1.0), alpha=1.0)
plt.scatter(x[int(u*float(sys.argv[5])):int(u*float(sys.argv[6]))], y[int(u*float(sys.argv[5])):int(u*float(sys.argv[6]))], s=area, c=(1.0,1.0,0.0), alpha=1.0)
plt.scatter(x[int(u*float(sys.argv[6])):int(u*float(sys.argv[7]))], y[int(u*float(sys.argv[6])):int(u*float(sys.argv[7]))], s=area, c=(0.0,0.0,0.0), alpha=1.0)
#ax.set_xlim([0.0, 0.7])
#ax.set_ylim([0.0, 1.0])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.axis('off')
fig.tight_layout()
plt.savefig(str(sys.argv[2]))
| 1,599 | 29.188679 | 159 | py |
mtenv | mtenv-main/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import codecs
import os.path
import subprocess
from pathlib import Path
import setuptools
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
def parse_dependency(filepath):
dep_list = []
for dep in open(filepath).read().splitlines():
if dep.startswith("#"):
continue
key = "#egg="
if key in dep:
git_link, egg_name = dep.split(key)
dep = f"{egg_name} @ {git_link}"
dep_list.append(dep)
return dep_list
base_requirements = parse_dependency("requirements/base.txt")
dev_requirements = base_requirements + parse_dependency("requirements/dev.txt")
extras_require = {}
for setup_path in Path("mtenv/envs").glob("**/setup.py"):
env_path = setup_path.parent
env_name = (
subprocess.run(["python", setup_path, "--name"], stdout=subprocess.PIPE)
.stdout.decode()
.strip()
)
extras_require[env_name] = base_requirements + parse_dependency(
f"{str(env_path)}/requirements.txt"
)
extras_require["all"] = list(
set([dep for requirements in extras_require.values() for dep in requirements])
)
extras_require["dev"] = dev_requirements
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="mtenv",
version=get_version("mtenv/__init__.py"),
author="Shagun Sodhani, Ludovic Denoyer, Pierre-Alexandre Kamienny, Olivier Delalleau",
author_email="[email protected], [email protected], [email protected], [email protected]",
description="MTEnv: MultiTask Environments for Reinforcement Learning",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
install_requires=base_requirements,
url="https://github.com/facbookresearch/mtenv",
packages=setuptools.find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests", "docs", "docsrc"]
),
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
extras_require=extras_require,
)
| 2,743 | 30.54023 | 98 | py |
mtenv | mtenv-main/noxfile.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import base64
import os
from pathlib import Path
from typing import List, Set
import nox
from nox.sessions import Session
DEFAULT_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
PYTHON_VERSIONS = os.environ.get(
"NOX_PYTHON_VERSIONS", ",".join(DEFAULT_PYTHON_VERSIONS)
).split(",")
def setup_env(session: Session, name: str) -> None:
env = {}
if name in ["metaworld"]:
key = "CIRCLECI_MJKEY"
if key in os.environ:
# job is running in CI
env[
"LD_LIBRARY_PATH"
] = "$LD_LIBRARY_PATH:/home/circleci/.mujoco/mujoco200/bin"
session.install(f".[{name}]", env=env)
def setup_mtenv(session: Session) -> None:
key = "CIRCLECI_MJKEY"
if key in os.environ:
# job is running in CI
mjkey = base64.b64decode(os.environ[key]).decode("utf-8")
mjkey_path = "/home/circleci/.mujoco/mjkey.txt"
with open(mjkey_path, "w") as f:
# even if the mjkey exists, we can safely overwrite it.
for line in mjkey:
f.write(line)
session.install("--upgrade", "setuptools", "pip")
session.install(".[dev]")
def get_core_paths(root: str) -> List[str]:
"""Return all the files/directories that are part of core package.
In practice, it just excludes the directories in env module"""
paths = []
for _path in Path(root).iterdir():
if _path.stem == "envs":
for _env_path in _path.iterdir():
if _env_path.is_file():
paths.append(str(_env_path))
else:
paths.append(str(_path))
return paths
class EnvSetup:
def __init__(
self, name: str, setup_path: Path, supported_python_versions: Set[str]
) -> None:
self.name = name
self.setup_path = str(setup_path)
self.path = str(setup_path.parent)
self.supported_python_versions = supported_python_versions
def parse_setup_file(session: Session, setup_path: Path) -> EnvSetup:
command = ["python", str(setup_path), "--name", "--classifiers"]
classifiers = session.run(*command, silent=True).splitlines()
name = classifiers[0]
python_version_string = "Programming Language :: Python :: "
supported_python_versions = {
stmt.replace(python_version_string, "")
for stmt in classifiers[1:]
if python_version_string in stmt
}
return EnvSetup(
name=name,
setup_path=setup_path,
supported_python_versions=supported_python_versions,
)
def get_all_envsetups(session: Session) -> List[EnvSetup]:
return [
parse_setup_file(session=session, setup_path=setup_path)
for setup_path in Path("mtenv/envs").glob("**/setup.py")
]
def get_all_env_setup_paths_as_nox_params():
return [
nox.param(setup_path, id=setup_path.parent.stem)
for setup_path in Path("mtenv/envs").glob("**/setup.py")
]
def get_supported_envsetups(session: Session) -> List[EnvSetup]:
"""Get the list of EnvSetups that can run in a given session."""
return [
env_setup
for env_setup in get_all_envsetups(session=session)
if session.python in env_setup.supported_python_versions
]
def get_supported_env_paths(session: Session) -> List[str]:
"""Get the list of env_paths that can run in a given session."""
return [env_setup.path for env_setup in get_supported_envsetups(session=session)]
@nox.session(python=PYTHON_VERSIONS)
def lint(session: Session) -> None:
setup_mtenv(session=session)
for _path in (
get_core_paths(root="mtenv")
+ get_core_paths(root="tests")
+ get_supported_env_paths(session=session)
):
session.run("black", "--check", _path)
session.run("flake8", _path)
@nox.session(python=PYTHON_VERSIONS)
def mypy(session: Session) -> None:
setup_mtenv(session=session)
for _path in get_core_paths(root="mtenv"):
session.run("mypy", "--strict", _path)
for envsetup in get_supported_envsetups(session=session):
setup_env(session=session, name=envsetup.name)
session.run("mypy", envsetup.path)
@nox.session(python=PYTHON_VERSIONS)
def test_wrappers(session) -> None:
setup_mtenv(session=session)
session.run("pytest", "tests/wrappers")
@nox.session(python=PYTHON_VERSIONS)
def test_examples(session) -> None:
setup_mtenv(session=session)
session.run("pytest", "tests/examples")
@nox.session(python=PYTHON_VERSIONS)
@nox.parametrize("env_setup_path", get_all_env_setup_paths_as_nox_params())
def test_envs(session, env_setup_path) -> None:
setup_mtenv(session=session)
envsetup = parse_setup_file(session=session, setup_path=env_setup_path)
if session.python not in envsetup.supported_python_versions:
print(f"Python {session.python} is not supported by {envsetup.name}")
return
setup_env(session=session, name=envsetup.name)
env = {"NOX_MTENV_ENV_PATH": envsetup.path}
command_for_headless_rendering = [
"xvfb-run",
"-a",
"-s",
"-screen 0 1024x768x24 -ac +extension GLX +render -noreset",
]
commands = []
key = "CIRCLECI_MJKEY"
if key in os.environ and envsetup.name in ["metaworld"]:
env["LD_LIBRARY_PATH"] = "$LD_LIBRARY_PATH:/home/circleci/.mujoco/mujoco200/bin"
if envsetup.name.startswith("MT-HiPBMDP"):
env["PYTHONPATH"] = "mtenv/envs/hipbmdp/local_dm_control_suite"
if envsetup.name in ["hipbmdp", "mpte"]:
commands = commands + command_for_headless_rendering
commands = commands + ["pytest", "tests/envs"]
session.run(*commands, env=env)
| 5,746 | 31.653409 | 88 | py |
mtenv | mtenv-main/examples/wrapped_bandit.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional
from gym import spaces
from examples.bandit import BanditEnv # type: ignore[import]
from mtenv.utils import seeding
from mtenv.utils.types import TaskObsType, TaskStateType
from mtenv.wrappers.env_to_mtenv import EnvToMTEnv
class MTBanditWrapper(EnvToMTEnv):
def set_task_observation(self, task_obs: TaskObsType) -> None:
self._task_obs = task_obs
self.env.reward_probability = self._task_obs
self._is_task_seed_set = False
def get_task_state(self) -> TaskStateType:
return self._task_obs
def set_task_state(self, task_state: TaskStateType) -> None:
self._task_obs = task_state
self.env.reward_probability = self._task_obs
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state` that contains all the information needed to revert to any
other task. For examples, refer to TBD"""
return self.observation_space["task_obs"].sample()
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for task information"""
self._is_task_seed_set = True
_, seed = seeding.np_random(seed)
self.observation_space["task_obs"].seed(seed)
return [seed]
def assert_task_seed_is_set(self) -> None:
"""Check that the task seed is set."""
assert self._is_task_seed_set, "please call `seed_task()` first"
def run() -> None:
n_arms = 5
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
env.seed(1)
env.seed_task(seed=2)
for task in range(3):
print("=== task " + str(task))
env.reset_task_state()
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
print(f"reward_probability: {env.unwrapped.reward_probability}")
if __name__ == "__main__":
run()
| 2,051 | 32.096774 | 90 | py |
mtenv | mtenv-main/examples/finite_mtenv_bandit.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional
import numpy as np
from gym import spaces
from mtenv import MTEnv
from mtenv.utils import seeding
from mtenv.utils.types import ActionType, ObsType, StepReturnType
TaskStateType = int
class FiniteMTBanditEnv(MTEnv):
"""Multitask Bandit Env where the task_state is sampled from a finite list of states"""
def __init__(self, n_tasks: int, n_arms: int):
super().__init__(
action_space=spaces.Discrete(n_arms),
env_observation_space=spaces.Box(
low=0.0, high=1.0, shape=(1,), dtype=np.float32
),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
self.n_arms = n_arms
self.n_tasks = n_tasks
self.observation_space["task_obs"].seed(0)
self.possible_task_observations = np.asarray(
[self.observation_space["task_obs"].sample() for _ in range(self.n_tasks)]
)
# possible_task_observations is assumed to be part of the environment definition ie
# everytime we instantiate the env, we get the same `possible_task_observations`.
self._should_reset_env = True
def reset(self, **kwargs: Dict[str, Any]) -> ObsType:
self.assert_env_seed_is_set()
self._should_reset_env = False
return {"env_obs": [0.0], "task_obs": self.task_obs}
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state` that contains all the information needed to revert to any
other task. For examples, refer to TBD"""
self.assert_task_seed_is_set()
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
return self.np_random_task.randint(0, self.n_tasks) # type: ignore[no-any-return, union-attr]
def set_task_state(self, task_state: TaskStateType) -> None:
self.task_state = task_state
self.task_obs = self.possible_task_observations[task_state]
def step(self, action: ActionType) -> StepReturnType:
if self._should_reset_env:
raise RuntimeError("Call `env.reset()` before calling `env.step()`")
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
sample = self.np_random_env.rand() # type: ignore[union-attr]
reward = 0.0
if sample < self.task_obs[action]: # type: ignore[index]
reward = 1.0
return (
{"env_obs": [0.0], "task_obs": self.task_obs},
reward,
False,
{},
)
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for task information"""
self.np_random_task, seed = seeding.np_random(seed)
# in this function, we do not need the self.np_random_task
return [seed]
def get_task_state(self) -> TaskStateType:
"""Return all the information needed to execute the current task again.
For examples, refer to TBD"""
return self.task_state
def run() -> None:
env = FiniteMTBanditEnv(n_tasks=10, n_arms=5)
env.seed(seed=1)
env.seed_task(seed=2)
for task in range(3):
print("=== Task " + str(task % 2))
env.set_task_state(task % 2)
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
new_env = FiniteMTBanditEnv(n_tasks=10, n_arms=5)
new_env.seed(seed=1)
new_env.seed_task(seed=2)
print("=== Executing the current task (from old env) in new env ")
new_env.set_task_state(task_state=env.get_task_state())
print(new_env.reset())
for _ in range(5):
action = new_env.action_space.sample()
print(new_env.step(action))
if __name__ == "__main__":
run()
| 4,016 | 35.518182 | 102 | py |
mtenv | mtenv-main/examples/bandit.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional, Tuple
import numpy as np
from gym import spaces
from gym.core import Env
from mtenv.utils import seeding
from mtenv.utils.types import ActionType, DoneType, EnvObsType, InfoType, RewardType
StepReturnType = Tuple[EnvObsType, RewardType, DoneType, InfoType]
class BanditEnv(Env): # type: ignore[misc]
# Class cannot subclass 'Env' (has type 'Any')
def __init__(self, n_arms: int):
self.n_arms = n_arms
self.action_space = spaces.Discrete(n_arms)
self.observation_space = spaces.Box(
low=0.0, high=1.0, shape=(1,), dtype=np.float32
)
self.reward_probability = spaces.Box(
low=0.0, high=1.0, shape=(self.n_arms,)
).sample()
def seed(self, seed: Optional[int] = None) -> List[int]:
self.np_random_env, seed = seeding.np_random(seed)
assert isinstance(seed, int)
return [seed]
def reset(self) -> EnvObsType:
return np.asarray([0.0])
def step(self, action: ActionType) -> StepReturnType:
sample = self.np_random_env.rand()
reward = 0.0
if sample < self.reward_probability[action]:
reward = 1.0
return np.asarray([0.0]), reward, False, {}
def run() -> None:
env = BanditEnv(5)
env.seed(seed=5)
for episode in range(3):
print("=== episode " + str(episode))
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
if __name__ == "__main__":
run()
| 1,632 | 27.649123 | 84 | py |
mtenv | mtenv-main/examples/mtenv_bandit.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from gym import spaces
from mtenv import MTEnv
from mtenv.utils.types import ActionType, ObsType, StepReturnType, TaskStateType
class MTBanditEnv(MTEnv):
def __init__(self, n_arms: int):
super().__init__(
action_space=spaces.Discrete(n_arms),
env_observation_space=spaces.Box(
low=0.0, high=1.0, shape=(1,), dtype=np.float32
),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
self.n_arms = n_arms
self._should_reset_env = True
def reset(self) -> ObsType:
self.assert_env_seed_is_set()
self._should_reset_env = False
return {"env_obs": [0.0], "task_obs": self.task_observation}
def sample_task_state(self) -> TaskStateType:
self.assert_task_seed_is_set()
return self.observation_space["task_obs"].sample()
def get_task_state(self) -> TaskStateType:
return self.task_observation
def set_task_state(self, task_state: TaskStateType) -> None:
self.task_observation = task_state
def step(self, action: ActionType) -> StepReturnType:
if self._should_reset_env:
raise RuntimeError("Call `env.reset()` before calling `env.step()`")
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
sample = self.np_random_env.rand() # type: ignore[union-attr]
reward = 0.0
if sample < self.task_observation[action]:
reward = 1.0
return (
{"env_obs": [0.0], "task_obs": self.task_observation},
reward,
False,
{},
)
def run() -> None:
env = MTBanditEnv(5)
env.seed(seed=1)
env.seed_task(seed=2)
for task in range(3):
print("=== Task " + str(task))
env.reset_task_state()
print(env.reset())
for _ in range(5):
action = env.action_space.sample()
print(env.step(action))
if __name__ == "__main__":
run()
| 2,186 | 29.802817 | 94 | py |
mtenv | mtenv-main/tests/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 | py |
mtenv | mtenv-main/tests/envs/registered_env_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Tuple
import pytest
from mtenv import make
from mtenv.envs.registration import MultitaskEnvSpec, mtenv_registry
from tests.utils.utils import validate_mtenv
ConfigType = Dict[str, Any]
def get_env_spec() -> List[Dict[str, MultitaskEnvSpec]]:
mtenv_env_path = os.environ.get("NOX_MTENV_ENV_PATH", "")
if mtenv_env_path == "":
# test all envs
return mtenv_registry.env_specs.items()
else:
# test only those environments which are on NOX_MTENV_ENV_PATH
mtenv_env_path = str(Path(mtenv_env_path).resolve())
env_specs = deepcopy(mtenv_registry.env_specs)
for key in list(env_specs.keys()):
entry_point = env_specs[key].entry_point.split(":")[0].replace(".", "/")
if mtenv_env_path not in str(Path(entry_point).resolve()):
env_specs.pop(key)
return env_specs.items()
def get_test_kwargs_from_spec(spec: MultitaskEnvSpec, key: str) -> List[Dict[str, Any]]:
if spec.test_kwargs and key in spec.test_kwargs:
return spec.test_kwargs[key]
else:
return []
def get_configs(get_valid_env_args: bool) -> Tuple[ConfigType, ConfigType]:
configs = []
key = "valid_env_kwargs" if get_valid_env_args else "invalid_env_kwargs"
for env_name, spec in get_env_spec():
test_config = deepcopy(spec.test_kwargs)
for key_to_pop in ["valid_env_kwargs", "invalid_env_kwargs"]:
if key_to_pop in test_config:
test_config.pop(key_to_pop)
for params in get_test_kwargs_from_spec(spec, key):
env_config = deepcopy(params)
env_config["id"] = env_name
configs.append((env_config, deepcopy(test_config)))
if get_valid_env_args:
env_config = deepcopy(spec.kwargs)
env_config["id"] = env_name
configs.append((env_config, deepcopy(test_config)))
return configs
@pytest.mark.parametrize(
"env_config, test_config", get_configs(get_valid_env_args=True)
)
def test_registered_env_with_valid_input(env_config, test_config):
env = make(**env_config)
validate_mtenv(env=env, **test_config)
@pytest.mark.parametrize(
"env_config, test_config", get_configs(get_valid_env_args=False)
)
def test_registered_env_with_invalid_input(env_config, test_config):
with pytest.raises(Exception):
env = make(**env_config)
validate_mtenv(env=env, **test_config)
| 2,599 | 33.666667 | 88 | py |
mtenv | mtenv-main/tests/envs/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 | py |
mtenv | mtenv-main/tests/examples/bandit_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from examples.bandit import BanditEnv # noqa: E402
from tests.utils.utils import validate_single_task_env
def get_valid_n_arms() -> List[int]:
return [1, 10, 100]
def get_invalid_n_arms() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_arms", get_valid_n_arms())
def test_n_arm_bandit_with_valid_input(n_arms):
env = BanditEnv(n_arms=n_arms)
env.seed(seed=5)
validate_single_task_env(env)
@pytest.mark.parametrize("n_arms", get_invalid_n_arms())
def test_n_arm_bandit_with_invalid_input(n_arms):
with pytest.raises(Exception):
env = BanditEnv(n_arms=n_arms)
env.seed(seed=5)
validate_single_task_env(env)
| 784 | 22.787879 | 70 | py |
mtenv | mtenv-main/tests/examples/wrapped_bandit_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from gym import spaces
from examples.bandit import BanditEnv # noqa: E402
from examples.wrapped_bandit import MTBanditWrapper # noqa: E402
from tests.utils.utils import validate_mtenv
def get_valid_n_arms() -> List[int]:
return [1, 10, 100]
def get_invalid_n_arms() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_arms", get_valid_n_arms())
def test_ntasks_id_wrapper_with_valid_input(n_arms):
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_arms", get_invalid_n_arms())
def test_ntasks_id_wrapper_with_invalid_input(n_arms):
with pytest.raises(Exception):
env = MTBanditWrapper(
env=BanditEnv(n_arms),
task_observation_space=spaces.Box(low=0.0, high=1.0, shape=(n_arms,)),
)
validate_mtenv(env=env)
| 1,043 | 25.769231 | 82 | py |
mtenv | mtenv-main/tests/examples/mtenv_bandit_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from examples.mtenv_bandit import MTBanditEnv # noqa: E402
from tests.utils.utils import validate_mtenv
def get_valid_n_arms() -> List[int]:
return [1, 10, 100]
def get_invalid_n_arms() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_arms", get_valid_n_arms())
def test_ntasks_id_wrapper_with_valid_input(n_arms):
env = MTBanditEnv(n_arms=n_arms)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_arms", get_invalid_n_arms())
def test_ntasks_id_wrapper_with_invalid_input(n_arms):
with pytest.raises(Exception):
env = MTBanditEnv(n_arms=n_arms)
validate_mtenv(env=env)
| 736 | 24.413793 | 70 | py |
mtenv | mtenv-main/tests/examples/finite_mtenv_bandit_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from examples.finite_mtenv_bandit import FiniteMTBanditEnv # noqa: E402
from tests.utils.utils import validate_mtenv
def get_valid_n_tasks_and_arms() -> List[int]:
return [(1, 2), (10, 20), (100, 200)]
def get_invalid_n_tasks_and_arms() -> List[int]:
return [(-1, 2), (0, 3), (1, -2), (3, 0)]
@pytest.mark.parametrize("n_tasks, n_arms", get_valid_n_tasks_and_arms())
def test_mtenv_bandit_with_valid_input(n_tasks, n_arms):
env = FiniteMTBanditEnv(n_tasks=n_tasks, n_arms=n_arms)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks, n_arms", get_invalid_n_tasks_and_arms())
def test_mtenv_bandit_with_invalid_input(n_tasks, n_arms):
with pytest.raises(Exception):
env = FiniteMTBanditEnv(n_tasks=n_tasks, n_arms=n_arms)
validate_mtenv(env=env)
| 906 | 30.275862 | 75 | py |
mtenv | mtenv-main/tests/examples/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 | py |
mtenv | mtenv-main/tests/wrappers/ntasks_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from mtenv.envs.control.cartpole import MTCartPole
from mtenv.wrappers.ntasks import NTasks as NTasksWrapper
from tests.utils.utils import validate_mtenv
def get_valid_num_tasks() -> List[int]:
return [1, 10, 100]
def get_invalid_num_tasks() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_tasks", get_valid_num_tasks())
def test_ntasks_wrapper_with_valid_input(n_tasks):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks", get_invalid_num_tasks())
def test_ntasks_wrapper_with_invalid_input(n_tasks):
with pytest.raises(Exception):
env = MTCartPole()
env = NTasksWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
| 865 | 24.470588 | 70 | py |
mtenv | mtenv-main/tests/wrappers/ntasks_id_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import pytest
from mtenv.envs.control.cartpole import MTCartPole
from mtenv.wrappers.ntasks_id import NTasksId as NTasksIdWrapper
from tests.utils.utils import validate_mtenv
def get_valid_num_tasks() -> List[int]:
return [1, 10, 100]
def get_invalid_num_tasks() -> List[int]:
return [-1, 0]
@pytest.mark.parametrize("n_tasks", get_valid_num_tasks())
def test_ntasks_id_wrapper_with_valid_input(n_tasks):
env = MTCartPole()
env = NTasksIdWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
@pytest.mark.parametrize("n_tasks", get_invalid_num_tasks())
def test_ntasks_id_wrapper_with_invalid_input(n_tasks):
with pytest.raises(Exception):
env = MTCartPole()
env = NTasksIdWrapper(env, n_tasks=n_tasks)
validate_mtenv(env=env)
| 882 | 24.970588 | 70 | py |
mtenv | mtenv-main/tests/wrappers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 71 | 35 | 70 | py |
mtenv | mtenv-main/tests/utils/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Tuple
import gym
import numpy as np
from mtenv import MTEnv
from mtenv.utils.types import (
DoneType,
EnvObsType,
InfoType,
ObsType,
RewardType,
StepReturnType,
)
StepReturnTypeSingleEnv = Tuple[EnvObsType, RewardType, DoneType, InfoType]
def validate_obs_type(obs: ObsType):
assert isinstance(obs, dict)
assert "env_obs" in obs
assert "task_obs" in obs
def validate_step_return_type(step_return: StepReturnType):
obs, reward, done, info = step_return
validate_obs_type(obs)
assert isinstance(reward, (float, int))
assert isinstance(done, bool)
assert isinstance(info, dict)
def valiate_obs_type_single_env(obs: EnvObsType):
assert isinstance(obs, np.ndarray)
def validate_step_return_type_single_env(step_return: StepReturnType):
obs, reward, done, info = step_return
valiate_obs_type_single_env(obs)
assert isinstance(reward, float)
assert isinstance(done, bool)
assert isinstance(info, dict)
def validate_mtenv(env: MTEnv) -> None:
env.seed(5)
env.assert_env_seed_is_set()
env.seed_task(15)
env.assert_task_seed_is_set()
for _env_index in range(10):
env.reset_task_state()
obs = env.reset()
validate_obs_type(obs)
for _step_index in range(3):
action = env.action_space.sample()
step_return = env.step(action)
validate_step_return_type(step_return)
def validate_single_task_env(env: gym.Env) -> None:
for _episode in range(10):
obs = env.reset()
valiate_obs_type_single_env(obs)
for _ in range(3):
action = env.action_space.sample()
step_return = env.step(action)
validate_step_return_type_single_env(step_return)
| 1,854 | 25.5 | 75 | py |
mtenv | mtenv-main/mtenv/core.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Core API of MultiTask Environments for Reinforcement Learning."""
from abc import ABC, abstractmethod
from typing import List, Optional
from gym.core import Env
from gym.spaces.dict import Dict as DictSpace
from gym.spaces.space import Space
from numpy.random import RandomState
from mtenv.utils import seeding
from mtenv.utils.types import (
ActionType,
ObsType,
StepReturnType,
TaskObsType,
TaskStateType,
)
class MTEnv(Env, ABC): # type: ignore[misc]
def __init__(
self,
action_space: Space,
env_observation_space: Space,
task_observation_space: Space,
) -> None:
"""Main class for multitask RL Environments.
This abstract class extends the OpenAI Gym environment and adds
support for return the task-specific information from the environment.
The observation returned from the single task environments is
encoded as `env_obs` (environment observation) while the task
specific observation is encoded as the `task_obs` (task observation).
The observation returned by `mtenv` is a dictionary of `env_obs` and
`task_obs`. Since this class extends the OpenAI gym, the `mtenv`
API looks similar to the gym API.
.. code-block:: python
import mtenv
env = mtenv.make('xxx')
env.reset()
Any multitask RL environment class should extend/implement this class.
Args:
action_space (Space)
env_observation_space (Space)
task_observation_space (Space)
"""
self.action_space = action_space
self.observation_space: DictSpace = DictSpace(
spaces={
"env_obs": env_observation_space,
"task_obs": task_observation_space,
}
)
self.np_random_env: Optional[RandomState] = None
self.np_random_task: Optional[RandomState] = None
self._task_obs: TaskObsType
@abstractmethod
def step(self, action: ActionType) -> StepReturnType:
"""Execute the action in the environment.
Args:
action (ActionType)
Returns:
StepReturnType: Tuple of `multitask observation`, `reward`,
`done`, and `info`. For more information on `multitask observation`
returned by the environment, refer :ref:`multitask_observation`.
"""
pass
def get_task_obs(self) -> TaskObsType:
"""Get the current value of task observation.
Environment returns task observation everytime we call `step` or
`reset`. This function is useful when the user wants to access the
task observation without acting in (or resetting) the environment.
Returns:
TaskObsType:
"""
return self._task_obs
@abstractmethod
def get_task_state(self) -> TaskStateType:
"""Return all the information needed to execute the current task
again.
This function is useful when we want to set the environment to a
previous task.
Returns:
TaskStateType: For more information on `task_state`, refer :ref:`task_state`.
"""
pass
@abstractmethod
def set_task_state(self, task_state: TaskStateType) -> None:
"""Reset the environment to a particular task.
`task_state` contains all the information that the environment
needs to switch to any other task.
Args:
task_state (TaskStateType): For more information on `task_state`,
refer :ref:`task_state`.
"""
pass
def assert_env_seed_is_set(self) -> None:
"""Check that seed (for the environment) is set.
`reset` function should invoke this function before resetting the
environment (for reproducibility).
"""
assert self.np_random_env is not None, "please call `seed()` first"
def assert_task_seed_is_set(self) -> None:
"""Check that seed (for the task) is set.
`sample_task_state` function should invoke this function before
sampling a new task state (for reproducibility).
"""
assert self.np_random_task is not None, "please call `seed_task()` first"
@abstractmethod
def reset(self) -> ObsType:
"""Reset the environment to some initial state and return the
observation in the new state.
The subclasses, extending this class, should ensure that the
environment seed is set (by calling `seed(int)`) before invoking this
method (for reproducibility). It can be done by invoking
`self.assert_env_seed_is_set()`.
Returns:
ObsType: For more information on `multitask observation`
returned by the environment, refer :ref:`multitask_observation`.
"""
pass
@abstractmethod
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state`.
`task_state` contains all the information that the environment
needs to switch to any other task.
The subclasses, extending this class, should ensure that the task
seed is set (by calling `seed_task(int)`) before invoking this
method (for reproducibility). It can be done by invoking
`self.assert_task_seed_is_set()`.
Returns:
TaskStateType: For more information on `task_state`,
refer :ref:`task_state`.
"""
pass
def reset_task_state(self) -> None:
"""Sample a new task_state and set the environment to that `task_state`.
For more information on `task_state`, refer :ref:`task_state`.
"""
self.set_task_state(task_state=self.sample_task_state())
def seed(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for the environment's random number generator.
Invoke `seed_task` to set the seed for the task's
random number generator.
Args:
seed (Optional[int], optional): Defaults to None.
Returns:
List[int]: Returns the list of seeds used in the environment's
random number generator. The first value in the list should be
the seed that should be passed to this method for reproducibility.
"""
self.np_random_env, seed = seeding.np_random(seed)
assert isinstance(seed, int)
return [seed]
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for the task's random number generator.
Invoke `seed` to set the seed for the environment's
random number generator.
Args:
seed (Optional[int], optional): Defaults to None.
Returns:
List[int]: Returns the list of seeds used in the task's
random number generator. The first value in the list should be
the seed that should be passed to this method for reproducibility.
"""
self.np_random_task, seed = seeding.np_random(seed)
assert isinstance(seed, int)
self.observation_space["task_obs"].seed(seed)
return [seed]
| 7,251 | 33.046948 | 89 | py |
mtenv | mtenv-main/mtenv/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
__version__ = "1.0"
from mtenv.core import MTEnv # noqa: F401
from mtenv.envs.registration import make # noqa: F401
__all__ = ["MTEnv", "make"]
| 219 | 26.5 | 70 | py |
mtenv | mtenv-main/mtenv/envs/registration.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
from typing import Any, Dict, Optional
from gym import error
from gym.core import Env
from gym.envs.registration import EnvRegistry, EnvSpec
class MultitaskEnvSpec(EnvSpec): # type: ignore[misc]
def __init__(
self,
id: str,
entry_point: Optional[str] = None,
reward_threshold: Optional[int] = None,
kwargs: Optional[Dict[str, Any]] = None,
nondeterministic: bool = False,
max_episode_steps: Optional[int] = None,
test_kwargs: Optional[Dict[str, Any]] = None,
):
"""A specification for a particular instance of the environment.
Used to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the
environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before
the task is considered solved
kwargs (dict): The kwargs to pass to the environment class
nondeterministic (bool): Whether this environment is
non-deterministic even after seeding
max_episode_steps (Optional[int]): The maximum number of steps
that an episode can consist of
test_kwargs (Optional[Dict[str, Any]], optional): Dictionary
to specify parameters for automated testing. Defaults to
None.
"""
super().__init__(
id=id,
entry_point=entry_point,
reward_threshold=reward_threshold,
nondeterministic=nondeterministic,
max_episode_steps=max_episode_steps,
kwargs=kwargs,
)
self.test_kwargs = test_kwargs
def __repr__(self) -> str:
return f"MultitaskEnvSpec({self.id})"
@property
def kwargs(self) -> Dict[str, Any]:
return self._kwargs # type: ignore[no-any-return]
class MultiEnvRegistry(EnvRegistry): # type: ignore[misc]
def __init__(self) -> None:
super().__init__()
def register(self, id: str, **kwargs: Any) -> None:
if id in self.env_specs:
raise error.Error("Cannot re-register id: {}".format(id))
self.env_specs[id] = MultitaskEnvSpec(id, **kwargs)
# Have a global registry
mtenv_registry = MultiEnvRegistry()
def register(id: str, **kwargs: Any) -> None:
return mtenv_registry.register(id, **kwargs)
def make(id: str, **kwargs: Any) -> Env:
env = mtenv_registry.make(id, **kwargs)
assert isinstance(env, Env)
return env
def spec(id: str) -> MultitaskEnvSpec:
spec = mtenv_registry.spec(id)
assert isinstance(spec, MultitaskEnvSpec)
return spec
| 2,823 | 31.45977 | 74 | py |
mtenv | mtenv-main/mtenv/envs/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from copy import deepcopy
from mtenv.envs.registration import register
# Control Task
# ----------------------------------------
register(
id="MT-CartPole-v0",
entry_point="mtenv.envs.control.cartpole:MTCartPole",
test_kwargs={
# "valid_env_kwargs": [],
"invalid_env_kwargs": [],
},
)
register(
id="MT-TabularMDP-v0",
entry_point="mtenv.envs.tabular_mdp.tmdp:UniformTMDP",
kwargs={"n_states": 4, "n_actions": 5},
test_kwargs={
"valid_env_kwargs": [{"n_states": 3, "n_actions": 2}],
"invalid_env_kwargs": [],
},
)
register(
id="MT-Acrobat-v0",
entry_point="mtenv.envs.control.acrobot:MTAcrobot",
test_kwargs={
# "valid_env_kwargs": [],
"invalid_env_kwargs": [],
},
)
register(
id="MT-TwoGoalMaze-v0",
entry_point="mtenv.envs.mpte.two_goal_maze_env:build_two_goal_maze_env",
kwargs={"size_x": 3, "size_y": 3, "task_seed": 169, "n_tasks": 100},
test_kwargs={
# "valid_env_kwargs": [],
"invalid_env_kwargs": [],
},
)
# remove it before making the repo public.
default_kwargs = {
"seed": 1,
"visualize_reward": False,
"from_pixels": True,
"height": 84,
"width": 84,
"frame_skip": 2,
"frame_stack": 3,
"sticky_observation_cfg": {},
"initial_task_state": 1,
}
for domain_name, task_name, prefix in [
("finger", "spin", "size"),
("cheetah", "run", "torso_length"),
("walker", "walk", "friction"),
("walker", "walk", "len"),
]:
file_ids = list(range(1, 11))
kwargs = deepcopy(default_kwargs)
kwargs["domain_name"] = domain_name
kwargs["task_name"] = task_name
kwargs["xml_file_ids"] = [f"{prefix}_{i}" for i in file_ids]
register(
id=f"MT-HiPBMDP-{domain_name.capitalize()}-{task_name.capitalize()}-vary-{prefix.replace('_', '-')}-v0",
entry_point="mtenv.envs.hipbmdp.env:build",
kwargs=kwargs,
test_kwargs={
# "valid_env_kwargs": [],
# "invalid_env_kwargs": [],
},
)
default_kwargs = {
"benchmark": None,
"benchmark_name": "MT10",
"env_id_to_task_map": None,
"should_perform_reward_normalization": True,
"num_copies_per_env": 1,
"initial_task_state": 1,
}
for benchmark_name in [("MT10"), ("MT50")]:
kwargs = deepcopy(default_kwargs)
kwargs["benchmark_name"] = benchmark_name
register(
id=f"MT-MetaWorld-{benchmark_name}-v0",
entry_point="mtenv.envs.metaworld.env:build",
kwargs=kwargs,
test_kwargs={
# "valid_env_kwargs": [],
# "invalid_env_kwargs": [],
},
)
kwargs = {
"benchmark": None,
"benchmark_name": "MT1",
"env_id_to_task_map": None,
"should_perform_reward_normalization": True,
"task_name": "pick-place-v1",
"num_copies_per_env": 1,
"initial_task_state": 0,
}
register(
id=f'MT-MetaWorld-{kwargs["benchmark_name"]}-v0',
entry_point="mtenv.envs.metaworld.env:build",
kwargs=kwargs,
test_kwargs={
# "valid_env_kwargs": [],
# "invalid_env_kwargs": [],
},
)
| 3,190 | 24.528 | 112 | py |
mtenv | mtenv-main/mtenv/envs/control/cartpole.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import numpy as np
from gym import logger, spaces
from mtenv import MTEnv
from mtenv.utils import seeding
"""
Classic cart-pole system implemented based on Rich Sutton et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
class MTCartPole(MTEnv):
"""A cartpole environment with varying physical values
(see the self._mu_to_vars function)
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 50}
def _mu_to_vars(self, mu):
self.gravity = 9.8 + mu[0] * 5
self.masscart = 1.0 + mu[1] * 0.5
self.masspole = 0.1 + mu[2] * 0.09
self.total_mass = self.masspole + self.masscart
self.length = 0.5 + mu[3] * 0.3
self.polemass_length = self.masspole * self.length
self.force_mag = 10 * mu[4]
if mu[4] == 0:
self.force_mag = 10
def __init__(self):
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
self.x_threshold = 2.4
self.theta_threshold_radians = 12 * 2 * math.pi / 360
high = np.array(
[
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max,
]
)
observation_space = spaces.Box(-high, high, dtype=np.float32)
action_space = spaces.Discrete(2)
high = np.array([1.0 for k in range(5)])
task_space = spaces.Box(-high, high, dtype=np.float32)
super().__init__(
action_space=action_space,
env_observation_space=observation_space,
task_observation_space=task_space,
)
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = self.masspole + self.masscart
self.length = 0.5 # actually half the pole's length
self.polemass_length = self.masspole * self.length
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = "euler"
# Angle at which to fail the episode
self.state = None
self.steps_beyond_done = None
self.task_state = None
def step(self, action):
self.t += 1
self._mu_to_vars(self.task_state)
assert self.action_space.contains(action), "%r (%s) invalid" % (
action,
type(action),
)
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (
force + self.polemass_length * theta_dot * theta_dot * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length
* (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)
)
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == "euler":
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = [x, x_dot, theta, theta_dot]
done = (
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
done = bool(done)
reward = 0
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior."
)
print(
"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior."
)
self.steps_beyond_done += 1
reward = 0.0
return (
{"env_obs": self.state, "task_obs": self.get_task_obs()},
reward,
done,
{},
)
def reset(self, **args):
self.assert_env_seed_is_set()
assert self.task_state is not None
self._mu_to_vars(self.task_state)
self.state = self.np_random_env.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
self.t = 0
return {"env_obs": self.state, "task_obs": self.get_task_obs()}
def get_task_obs(self):
return self.task_state
def get_task_state(self):
return self.task_state
def set_task_state(self, task_state):
self.task_state = task_state
def sample_task_state(self):
self.assert_task_seed_is_set()
super().sample_task_state()
new_task_state = [
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
]
return new_task_state
def seed(self, env_seed):
self.np_random_env, seed = seeding.np_random(env_seed)
return [seed]
def seed_task(self, task_seed):
self.np_random_task, seed = seeding.np_random(task_seed)
return [seed]
class CartPole(MTCartPole):
"""The original cartpole environment in the MTEnv fashion"""
def __init__(self):
super().__init__()
def sample_task_state(self):
new_task_state = [0.0, 0.0, 0.0, 0.0, 0.0]
return new_task_state
if __name__ == "__main__":
env = MTCartPole()
env.seed(5)
env.seed_task(15)
env.reset_task_state()
obs = env.reset()
print(obs)
done = False
while not done:
obs, rew, done, _ = env.step(np.random.randint(env.action_space.n))
print(obs)
| 6,710 | 32.059113 | 218 | py |
mtenv | mtenv-main/mtenv/envs/control/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import setuptools
from mtenv.utils.setup_utils import parse_dependency
env_name = "control"
path = Path(__file__).parent / "requirements.txt"
requirements = parse_dependency(path)
with (Path(__file__).parent / "README.md").open() as fh:
long_description = fh.read()
setuptools.setup(
name=env_name,
version="0.0.1",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 812 | 27.034483 | 70 | py |
mtenv | mtenv-main/mtenv/envs/control/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mtenv.envs.control.cartpole import CartPole, MTCartPole # noqa: F401
| 146 | 48 | 74 | py |
mtenv | mtenv-main/mtenv/envs/control/acrobot.py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
import numpy as np
from gym import spaces
from numpy import cos, pi, sin
from mtenv import MTEnv
from mtenv.utils import seeding
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
__credits__ = [
"Alborz Geramifard",
"Robert H. Klein",
"Christoph Dann",
"William Dabney",
"Jonathan P. How",
]
__license__ = "BSD 3-Clause"
__author__ = "Christoph Dann <[email protected]>"
# SOURCE:
# https://github.com/rlpy/rlpy/blob/master/rlpy/Domains/Acrobot.py
class MTAcrobot(MTEnv):
"""A acrobot environment with varying characteristics
The task descriptor is composed of values between -1 and +1 and mapped to acrobot physical characcteristics in the
self._mu_to_vars function.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 15}
dt = 0.2
def _mu_to_vars(self, mu):
self.LINK_LENGTH_1 = 1.0 + mu[0] * 0.5
self.LINK_LENGTH_2 = 1.0 + mu[1] * 0.5
self.LINK_MASS_1 = 1.0 + mu[2] * 0.5
self.LINK_MASS_2 = 1.0 + mu[3] * 0.5
self.LINK_COM_POS_1 = 0.5
self.LINK_COM_POS_2 = 0.5
if mu[6] > 0:
self.AVAIL_TORQUE = [-1.0, 0.0, 1.0]
else:
self.AVAIL_TORQUE = [1.0, 0.0, -1.0]
self.LINK_MOI = 1.0
torque_noise_max = 0.0
MAX_VEL_1 = 4 * pi + pi
MAX_VEL_2 = 9 * pi + 2 * pi
#: use dynamics equations from the nips paper or the book
book_or_nips = "book"
action_arrow = None
domain_fig = None
actions_num = 3
def __init__(self):
self.viewer = None
self.action_space = spaces.Discrete(3)
self.state = None
high = np.array(
[1.5, 1.5, 1.5, 1.5, self.MAX_VEL_1, self.MAX_VEL_2], dtype=np.float32
)
low = -high
observation_space = spaces.Box(low=low, high=high, dtype=np.float32)
action_space = spaces.Discrete(3)
high = np.array([1.0 for k in range(5)])
task_space = spaces.Box(-high, high, dtype=np.float32)
super().__init__(
action_space=action_space,
env_observation_space=observation_space,
task_observation_space=task_space,
)
def step(self, a):
self.t += 1
self._mu_to_vars(self.task_state)
s = self.state
torque = self.AVAIL_TORQUE[a]
# Add noise to the force action
if self.torque_noise_max > 0:
torque += self.np_random_env.uniform(
-self.torque_noise_max, self.torque_noise_max
)
# Now, augment the state with our force action so it can be passed to
# _dsdt
s_augmented = np.append(s, torque)
ns = rk4(self._dsdt, s_augmented, [0, self.dt])
# only care about final timestep of integration returned by integrator
ns = ns[-1]
ns = ns[:4] # omit action
# ODEINT IS TOO SLOW!
# ns_continuous = integrate.odeint(self._dsdt, self.s_continuous, [0, self.dt])
# self.s_continuous = ns_continuous[-1] # We only care about the state
# at the ''final timestep'', self.dt
ns[0] = wrap(ns[0], -pi, pi)
ns[1] = wrap(ns[1], -pi, pi)
ns[2] = bound(ns[2], -self.MAX_VEL_1, self.MAX_VEL_1)
ns[3] = bound(ns[3], -self.MAX_VEL_2, self.MAX_VEL_2)
self.state = ns
terminal = self._terminal()
reward = -1.0 if not terminal else 0.0
return (
{"env_obs": self._get_obs(), "task_obs": self.get_task_obs()},
reward,
terminal,
{},
)
def reset(self):
self._mu_to_vars(self.task_state)
self.state = self.np_random_env.uniform(low=-0.1, high=0.1, size=(4,))
self.t = 0
return {"env_obs": self._get_obs(), "task_obs": self.get_task_obs()}
def get_task_obs(self):
return self.task_state
def get_task_state(self):
return self.task_state
def set_task_state(self, task_state):
self.task_state = task_state
def _get_obs(self):
s = self.state
return [cos(s[0]), sin(s[0]), cos(s[1]), sin(s[1]), s[2], s[3]]
def _terminal(self):
s = self.state
return bool(-cos(s[0]) - cos(s[1] + s[0]) > 1.0)
def _dsdt(self, s_augmented, t):
m1 = self.LINK_MASS_1
m2 = self.LINK_MASS_2
l1 = self.LINK_LENGTH_1
lc1 = self.LINK_COM_POS_1
lc2 = self.LINK_COM_POS_2
I1 = self.LINK_MOI
I2 = self.LINK_MOI
g = 9.8
a = s_augmented[-1]
s = s_augmented[:-1]
theta1 = s[0]
theta2 = s[1]
dtheta1 = s[2]
dtheta2 = s[3]
d1 = (
m1 * lc1 ** 2
+ m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(theta2))
+ I1
+ I2
)
d2 = m2 * (lc2 ** 2 + l1 * lc2 * cos(theta2)) + I2
phi2 = m2 * lc2 * g * cos(theta1 + theta2 - pi / 2.0)
phi1 = (
-m2 * l1 * lc2 * dtheta2 ** 2 * sin(theta2)
- 2 * m2 * l1 * lc2 * dtheta2 * dtheta1 * sin(theta2)
+ (m1 * lc1 + m2 * l1) * g * cos(theta1 - pi / 2)
+ phi2
)
if self.book_or_nips == "nips":
# the following line is consistent with the description in the
# paper
ddtheta2 = (a + d2 / d1 * phi1 - phi2) / (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
else:
# the following line is consistent with the java implementation and the
# book
ddtheta2 = (
a + d2 / d1 * phi1 - m2 * l1 * lc2 * dtheta1 ** 2 * sin(theta2) - phi2
) / (m2 * lc2 ** 2 + I2 - d2 ** 2 / d1)
ddtheta1 = -(d2 * ddtheta2 + phi1) / d1
return (dtheta1, dtheta2, ddtheta1, ddtheta2, 0.0)
def seed(self, env_seed):
self.np_random_env, seed = seeding.np_random(env_seed)
return [seed]
def seed_task(self, task_seed):
self.np_random_task, seed = seeding.np_random(task_seed)
return [seed]
def sample_task_state(self):
self.assert_task_seed_is_set()
super().sample_task_state()
new_task_state = [
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
self.np_random_task.uniform(-1, 1),
]
return new_task_state
def wrap(x, m, M):
"""
:param x: a scalar
:param m: minimum possible value in range
:param M: maximum possible value in range
Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which
truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n
For example, m = -180, M = 180 (degrees), x = 360 --> returns 0.
"""
diff = M - m
while x > M:
x = x - diff
while x < m:
x = x + diff
return x
def bound(x, m, M=None):
"""
:param x: scalar
Either have m as scalar, so bound(x,m,M) which returns m <= x <= M *OR*
have m as length 2 vector, bound(x,m, <IGNORED>) returns m[0] <= x <= m[1].
"""
if M is None:
M = m[1]
m = m[0]
# bound x between min (m) and Max (M)
return min(max(x, m), M)
def rk4(derivs, y0, t, *args, **kwargs):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
*args*
additional arguments passed to the derivative function
*kwargs*
additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist, *args, **kwargs))
k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))
k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))
k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout
class Acrobot(MTAcrobot):
"""The original acrobot environment in the MTEnv fashion"""
def __init__(self):
super().__init__()
def sample_task_state(self):
self.assert_task_seed_is_set()
super().sample_task_state()
new_task_state = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
return new_task_state
if __name__ == "__main__":
env = MTAcrobot()
env.seed(5)
env.seed_task(15)
env.reset_task_state()
obs = env.reset()
print(obs)
done = False
while not done:
obs, rew, done, _ = env.step(np.random.randint(env.action_space.n))
print(obs)
| 10,088 | 29.480363 | 122 | py |
mtenv | mtenv-main/mtenv/envs/metaworld/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import setuptools
from mtenv.utils.setup_utils import parse_dependency
env_name = "metaworld"
path = Path(__file__).parent / "requirements.txt"
requirements = parse_dependency(path)
with (Path(__file__).parent / "README.md").open() as fh:
long_description = fh.read()
setuptools.setup(
name=env_name,
version="0.0.1",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 766 | 25.448276 | 70 | py |
mtenv | mtenv-main/mtenv/envs/metaworld/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/metaworld/env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from typing import Any, Callable, Dict, List, Optional, Tuple
import metaworld
from gym import Env
from mtenv import MTEnv
from mtenv.envs.metaworld.wrappers.normalized_env import ( # type: ignore[attr-defined]
NormalizedEnvWrapper,
)
from mtenv.envs.shared.wrappers.multienv import MultiEnvWrapper
EnvBuilderType = Callable[[], Env]
TaskStateType = int
TaskObsType = int
EnvIdToTaskMapType = Dict[str, metaworld.Task]
class MetaWorldMTWrapper(MultiEnvWrapper):
def __init__(
self,
funcs_to_make_envs: List[EnvBuilderType],
initial_task_state: TaskStateType,
env_id_to_task_map: EnvIdToTaskMapType,
) -> None:
"""Wrapper to make MetaWorld environment compatible with Multitask
Environment API. See :cite:`yu2020meta` for more details about
MetaWorld.
Args:
funcs_to_make_envs (List[EnvBuilderType]): list of constructor
functions to make the environments.
initial_task_state (TaskStateType): initial task/environment
to select.
env_id_to_task_map (EnvIdToTaskMapType): In MetaWorld, each
environment can be associated with multiple tasks. This
dict persists the mapping between environment ids and tasks.
"""
super().__init__(
funcs_to_make_envs=funcs_to_make_envs,
initial_task_state=initial_task_state,
)
self.env_id_to_task_map = env_id_to_task_map
def get_list_of_func_to_make_envs(
benchmark: Optional[metaworld.Benchmark],
benchmark_name: str,
env_id_to_task_map: Optional[EnvIdToTaskMapType],
should_perform_reward_normalization: bool = True,
task_name: str = "pick-place-v1",
num_copies_per_env: int = 1,
) -> Tuple[List[Any], Dict[str, Any]]:
"""Return a list of functions to construct the MetaWorld environments
and a mapping of environment ids to tasks.
Args:
benchmark (Optional[metaworld.Benchmark]): `benchmark` to create
tasks from.
benchmark_name (str): name of the `benchmark`. This is used only
when the `benchmark` is None.
env_id_to_task_map (Optional[EnvIdToTaskMapType]): In MetaWorld,
each environment can be associated with multiple tasks. This
dict persists the mapping between environment ids and tasks.
should_perform_reward_normalization (bool, optional): Defaults to
True.
task_name (str, optional): In case of MT1, only . Defaults to
"pick-place-v1".
num_copies_per_env (int, optional): Number of copies to create for
each environment. Defaults to 1.
Raises:
ValueError: if `benchmark` is None and `benchmark_name` is not
MT1, MT10, or MT50.
Returns:
Tuple[List[Any], Dict[str, Any]]: A tuple of two elements. The
first element is a list of functions to construct the MetaWorld
environments and the second is a mapping of environment ids
to tasks.
"""
if not benchmark:
if benchmark_name == "MT1":
benchmark = metaworld.ML1(task_name)
elif benchmark_name == "MT10":
benchmark = metaworld.MT10()
elif benchmark_name == "MT50":
benchmark = metaworld.MT50()
else:
raise ValueError(f"benchmark_name={benchmark_name} is not valid.")
env_id_list = list(benchmark.train_classes.keys())
def _get_class_items(current_benchmark):
return current_benchmark.train_classes.items()
def _get_tasks(current_benchmark):
return current_benchmark.train_tasks
def _get_env_id_to_task_map() -> EnvIdToTaskMapType:
env_id_to_task_map: EnvIdToTaskMapType = {}
current_benchmark = benchmark
for env_id in env_id_list:
for name, _ in _get_class_items(current_benchmark):
if name == env_id:
task = random.choice(
[
task
for task in _get_tasks(current_benchmark)
if task.env_name == name
]
)
env_id_to_task_map[env_id] = task
return env_id_to_task_map
if env_id_to_task_map is None:
env_id_to_task_map: EnvIdToTaskMapType = _get_env_id_to_task_map() # type: ignore[no-redef]
assert env_id_to_task_map is not None
def get_func_to_make_envs(env_id: str):
current_benchmark = benchmark
def _make_env():
for name, env_cls in _get_class_items(current_benchmark):
if name == env_id:
env = env_cls()
task = env_id_to_task_map[env_id]
env.set_task(task)
if should_perform_reward_normalization:
env = NormalizedEnvWrapper(env, normalize_reward=True)
return env
return _make_env
if num_copies_per_env > 1:
env_id_list = [
[env_id for _ in range(num_copies_per_env)] for env_id in env_id_list
]
env_id_list = [
env_id for env_id_sublist in env_id_list for env_id in env_id_sublist
]
funcs_to_make_envs = [get_func_to_make_envs(env_id) for env_id in env_id_list]
return funcs_to_make_envs, env_id_to_task_map
def build(
benchmark: Optional[metaworld.Benchmark],
benchmark_name: str,
env_id_to_task_map: Optional[EnvIdToTaskMapType],
should_perform_reward_normalization: bool = True,
task_name: str = "pick-place-v1",
num_copies_per_env: int = 1,
initial_task_state: int = 1,
) -> MTEnv:
"""Build a MTEnv comptaible variant of MetaWorld.
Args:
benchmark (Optional[metaworld.Benchmark]): `benchmark` to create
tasks from.
benchmark_name (str): name of the `benchmark`. This is used only
when the `benchmark` is None.
env_id_to_task_map (Optional[EnvIdToTaskMapType]): In MetaWorld,
each environment can be associated with multiple tasks. This
dict persists the mapping between environment ids and tasks.
should_perform_reward_normalization (bool, optional): Defaults to
True.
task_name (str, optional): In case of MT1, only . Defaults to
"pick-place-v1".
num_copies_per_env (int, optional): Number of copies to create for
each environment. Defaults to 1.
initial_task_state (int, optional): initial task/environment to
select. Defaults to 1.
Returns:
MTEnv:
"""
funcs_to_make_envs, env_id_to_task_map = get_list_of_func_to_make_envs(
benchmark=benchmark,
benchmark_name=benchmark_name,
env_id_to_task_map=env_id_to_task_map,
should_perform_reward_normalization=should_perform_reward_normalization,
task_name=task_name,
num_copies_per_env=num_copies_per_env,
)
assert env_id_to_task_map is not None
mtenv = MetaWorldMTWrapper(
funcs_to_make_envs=funcs_to_make_envs,
initial_task_state=initial_task_state,
env_id_to_task_map=env_id_to_task_map,
)
return mtenv
| 7,359 | 36.171717 | 100 | py |
mtenv | mtenv-main/mtenv/envs/metaworld/wrappers/normalized_env.py | # This code is taken from: https://raw.githubusercontent.com/rlworkgroup/garage/af57bf9c6b10cd733cb0fa9bfe3abd0ba239fd6e/src/garage/envs/normalized_env.py
#
# """"An environment wrapper that normalizes action, observation and reward."""
# type: ignore
import gym
import gym.spaces
import gym.spaces.utils
import numpy as np
class NormalizedEnvWrapper(gym.Wrapper):
"""An environment wrapper for normalization.
This wrapper normalizes action, and optionally observation and reward.
Args:
env (garage.envs.GarageEnv): An environment instance.
scale_reward (float): Scale of environment reward.
normalize_obs (bool): If True, normalize observation.
normalize_reward (bool): If True, normalize reward. scale_reward is
applied after normalization.
expected_action_scale (float): Assuming action falls in the range of
[-expected_action_scale, expected_action_scale] when normalize it.
flatten_obs (bool): Flatten observation if True.
obs_alpha (float): Update rate of moving average when estimating the
mean and variance of observations.
reward_alpha (float): Update rate of moving average when estimating the
mean and variance of rewards.
"""
def __init__(
self,
env,
scale_reward=1.0,
normalize_obs=False,
normalize_reward=False,
expected_action_scale=1.0,
flatten_obs=True,
obs_alpha=0.001,
reward_alpha=0.001,
):
super().__init__(env)
self._scale_reward = scale_reward
self._normalize_obs = normalize_obs
self._normalize_reward = normalize_reward
self._expected_action_scale = expected_action_scale
self._flatten_obs = flatten_obs
self._obs_alpha = obs_alpha
flat_obs_dim = gym.spaces.utils.flatdim(env.observation_space)
self._obs_mean = np.zeros(flat_obs_dim)
self._obs_var = np.ones(flat_obs_dim)
self._reward_alpha = reward_alpha
self._reward_mean = 0.0
self._reward_var = 1.0
def _update_obs_estimate(self, obs):
flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs)
self._obs_mean = (
1 - self._obs_alpha
) * self._obs_mean + self._obs_alpha * flat_obs
self._obs_var = (
1 - self._obs_alpha
) * self._obs_var + self._obs_alpha * np.square(flat_obs - self._obs_mean)
def _update_reward_estimate(self, reward):
self._reward_mean = (
1 - self._reward_alpha
) * self._reward_mean + self._reward_alpha * reward
self._reward_var = (
1 - self._reward_alpha
) * self._reward_var + self._reward_alpha * np.square(
reward - self._reward_mean
)
def _apply_normalize_obs(self, obs):
"""Compute normalized observation.
Args:
obs (np.ndarray): Observation.
Returns:
np.ndarray: Normalized observation.
"""
self._update_obs_estimate(obs)
flat_obs = gym.spaces.utils.flatten(self.env.observation_space, obs)
normalized_obs = (flat_obs - self._obs_mean) / (np.sqrt(self._obs_var) + 1e-8)
if not self._flatten_obs:
normalized_obs = gym.spaces.utils.unflatten(
self.env.observation_space, normalized_obs
)
return normalized_obs
def _apply_normalize_reward(self, reward):
"""Compute normalized reward.
Args:
reward (float): Reward.
Returns:
float: Normalized reward.
"""
self._update_reward_estimate(reward)
return reward / (np.sqrt(self._reward_var) + 1e-8)
def reset(self, **kwargs):
"""Reset environment.
Args:
**kwargs: Additional parameters for reset.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step.
* infos (dict): Environment-dependent additional information.
"""
ret = self.env.reset(**kwargs)
if self._normalize_obs:
return self._apply_normalize_obs(ret)
else:
return ret
def step(self, action):
"""Feed environment with one step of action and get result.
Args:
action (np.ndarray): An action fed to the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step.
* infos (dict): Environment-dependent additional information.
"""
if isinstance(self.action_space, gym.spaces.Box):
# rescale the action when the bounds are not inf
lb, ub = self.action_space.low, self.action_space.high
if np.all(lb != -np.inf) and np.all(ub != -np.inf):
scaled_action = lb + (action + self._expected_action_scale) * (
0.5 * (ub - lb) / self._expected_action_scale
)
scaled_action = np.clip(scaled_action, lb, ub)
else:
scaled_action = action
else:
scaled_action = action
try:
next_obs, reward, done, info = self.env.step(scaled_action)
except Exception as e:
print(e)
if self._normalize_obs:
next_obs = self._apply_normalize_obs(next_obs)
if self._normalize_reward:
reward = self._apply_normalize_reward(reward)
return next_obs, reward * self._scale_reward, done, info
| 5,977 | 34.164706 | 154 | py |
mtenv | mtenv-main/mtenv/envs/metaworld/wrappers/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/shared/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/shared/wrappers/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/shared/wrappers/multienv.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to (lazily) construct a multitask environment from a list of
constructors (list of functions to construct the environments)."""
from typing import Callable, List, Optional
from gym.core import Env
from gym.spaces.discrete import Discrete as DiscreteSpace
from mtenv import MTEnv
from mtenv.utils import seeding
from mtenv.utils.types import ActionType, EnvObsType, ObsType, StepReturnType
EnvBuilderType = Callable[[], Env]
TaskStateType = int
TaskObsType = int
class MultiEnvWrapper(MTEnv):
def __init__(
self,
funcs_to_make_envs: List[EnvBuilderType],
initial_task_state: TaskStateType,
) -> None:
"""Wrapper to (lazily) construct a multitask environment from a
list of constructors (list of functions to construct the
environments).
The wrapper enables activating/slecting any environment (from the
list of environments that can be created) and that environment is
treated as the current task. The environments are created lazily.
Note that this wrapper is experimental and may change in the future.
Args:
funcs_to_make_envs (List[EnvBuilderType]): list of constructor
functions to make the environments.
initial_task_state (TaskStateType): intial task/environment
to select.
"""
self._num_tasks = len(funcs_to_make_envs)
self._funcs_to_make_envs = funcs_to_make_envs
self._envs = [None for _ in range(self._num_tasks)]
self._envs[initial_task_state] = funcs_to_make_envs[initial_task_state]()
self.env: Env = self._envs[initial_task_state]
super().__init__(
action_space=self.env.action_space,
env_observation_space=self.env.observation_space,
task_observation_space=DiscreteSpace(n=self._num_tasks),
)
self.task_obs: TaskObsType = initial_task_state
def _make_observation(self, env_obs: EnvObsType) -> ObsType:
return {
"env_obs": env_obs,
"task_obs": self.task_obs,
}
def step(self, action: ActionType) -> StepReturnType:
env_obs, reward, done, info = self.env.step(action)
return self._make_observation(env_obs=env_obs), reward, done, info
def get_task_state(self) -> TaskStateType:
return self.task_obs
def set_task_state(self, task_state: TaskStateType) -> None:
self.task_obs = task_state
if self._envs[task_state] is None:
self._envs[task_state] = self._funcs_to_make_envs[task_state]()
self.env = self._envs[task_state]
def assert_env_seed_is_set(self) -> None:
"""The seed is set during the call to the constructor of self.env"""
pass
def assert_task_seed_is_set(self) -> None:
assert self.np_random_task is not None, "please call `seed_task()` first"
def reset(self) -> ObsType:
return self._make_observation(env_obs=self.env.reset())
def sample_task_state(self) -> TaskStateType:
self.assert_task_seed_is_set()
task_state = self.np_random_task.randint(self._num_tasks) # type: ignore[union-attr]
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
assert isinstance(task_state, int)
return task_state
def reset_task_state(self) -> None:
self.set_task_state(task_state=self.sample_task_state())
def seed(self, seed: Optional[int] = None) -> List[int]:
self.np_random_env, seed = seeding.np_random(seed)
env_seeds = self.env.seed(seed)
if isinstance(env_seeds, list):
return [seed] + env_seeds
return [seed]
| 3,850 | 37.89899 | 94 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/dmc_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict
import gym
from gym.core import Env
from gym.envs.registration import register
from mtenv.envs.hipbmdp.wrappers import framestack, sticky_observation
def _build_env(
domain_name: str,
task_name: str,
seed: int = 1,
xml_file_id: str = "none",
visualize_reward: bool = True,
from_pixels: bool = False,
height: int = 84,
width: int = 84,
camera_id: int = 0,
frame_skip: int = 1,
environment_kwargs: Any = None,
episode_length: int = 1000,
) -> Env:
if xml_file_id is None:
env_id = "dmc_%s_%s_%s-v1" % (domain_name, task_name, seed)
else:
env_id = "dmc_%s_%s_%s_%s-v1" % (domain_name, task_name, xml_file_id, seed)
if from_pixels:
assert (
not visualize_reward
), "cannot use visualize reward when learning from pixels"
# shorten episode length
max_episode_steps = (episode_length + frame_skip - 1) // frame_skip
if env_id not in gym.envs.registry.env_specs:
register(
id=env_id,
entry_point="mtenv.envs.hipbmdp.wrappers.dmc_wrapper:DMCWrapper",
kwargs={
"domain_name": domain_name,
"task_name": task_name,
"task_kwargs": {"random": seed, "xml_file_id": xml_file_id},
"environment_kwargs": environment_kwargs,
"visualize_reward": visualize_reward,
"from_pixels": from_pixels,
"height": height,
"width": width,
"camera_id": camera_id,
"frame_skip": frame_skip,
},
max_episode_steps=max_episode_steps,
)
return gym.make(env_id)
def build_dmc_env(
domain_name: str,
task_name: str,
seed: int,
xml_file_id: str,
visualize_reward: bool,
from_pixels: bool,
height: int,
width: int,
frame_skip: int,
frame_stack: int,
sticky_observation_cfg: Dict[str, Any],
) -> Env:
"""Build a single DMC environment as described in
:cite:`tassa2020dmcontrol`.
Args:
domain_name (str): name of the domain.
task_name (str): name of the task.
seed (int): environment seed (for reproducibility).
xml_file_id (str): id of the xml file to use.
visualize_reward (bool): should visualize reward ?
from_pixels (bool): return pixel observations?
height (int): height of pixel frames.
width (int): width of pixel frames.
frame_skip (int): should skip frames?
frame_stack (int): should stack frames together?
sticky_observation_cfg (Dict[str, Any]): Configuration for using
sticky observations. It should be a dictionary with three
keys, `should_use` which specifies if the config should be
used, `sticky_probability` which specifies the probability of
choosing a previous task and `last_k` which specifies the
number of previous frames to choose from.
Returns:
Env:
"""
env = _build_env(
domain_name=domain_name,
task_name=task_name,
seed=seed,
visualize_reward=visualize_reward,
from_pixels=from_pixels,
height=height,
width=width,
frame_skip=frame_skip,
xml_file_id=xml_file_id,
)
if from_pixels:
env = framestack.FrameStack(env, k=frame_stack)
if sticky_observation_cfg and sticky_observation_cfg["should_use"]:
env = sticky_observation.StickyObservation( # type: ignore[attr-defined]
env=env,
sticky_probability=sticky_observation_cfg["sticky_probability"],
last_k=sticky_observation_cfg["last_k"],
)
return env
| 3,821 | 31.948276 | 83 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import setuptools
from mtenv.utils.setup_utils import parse_dependency
env_name = "hipbmdp"
path = Path(__file__).parent / "requirements.txt"
requirements = parse_dependency(path)
with (Path(__file__).parent / "README.md").open() as fh:
long_description = fh.read()
setuptools.setup(
name=env_name,
version="0.0.1",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 764 | 25.37931 | 70 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/hipbmdp/env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Callable, Dict, List
from gym.core import Env
from mtenv import MTEnv
from mtenv.envs.hipbmdp import dmc_env
from mtenv.envs.shared.wrappers.multienv import MultiEnvWrapper
EnvBuilderType = Callable[[], Env]
TaskStateType = int
TaskObsType = int
def build(
domain_name: str,
task_name: str,
seed: int,
xml_file_ids: List[str],
visualize_reward: bool,
from_pixels: bool,
height: int,
width: int,
frame_skip: int,
frame_stack: int,
sticky_observation_cfg: Dict[str, Any],
initial_task_state: int = 1,
) -> MTEnv:
"""Build multitask environment as described in HiPBMDP paper. See
:cite:`mtrl_as_a_hidden_block_mdp` for more details.
Args:
domain_name (str): name of the domain.
task_name (str): name of the task.
seed (int): environment seed (for reproducibility).
xml_file_ids (List[str]): ids of xml files.
visualize_reward (bool): should visualize reward ?
from_pixels (bool): return pixel observations?
height (int): height of pixel frames.
width (int): width of pixel frames.
frame_skip (int): should skip frames?
frame_stack (int): should stack frames together?
sticky_observation_cfg (Dict[str, Any]): Configuration for using
sticky observations. It should be a dictionary with three
keys, `should_use` which specifies if the config should be
used, `sticky_probability` which specifies the probability of
choosing a previous task and `last_k` which specifies the
number of previous frames to choose from.
initial_task_state (int, optional): intial task/environment
to select. Defaults to 1.
Returns:
MTEnv:
"""
def get_func_to_make_envs(xml_file_id: str) -> EnvBuilderType:
def _func() -> Env:
return dmc_env.build_dmc_env(
domain_name=domain_name,
task_name=task_name,
seed=seed,
xml_file_id=xml_file_id,
visualize_reward=visualize_reward,
from_pixels=from_pixels,
height=height,
width=width,
frame_skip=frame_skip,
frame_stack=frame_stack,
sticky_observation_cfg=sticky_observation_cfg,
)
return _func
funcs_to_make_envs = [
get_func_to_make_envs(xml_file_id=file_id) for file_id in xml_file_ids
]
mtenv = MultiEnvWrapper(
funcs_to_make_envs=funcs_to_make_envs, initial_task_state=initial_task_state
)
return mtenv
| 2,727 | 32.268293 | 84 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/wrappers/framestack.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to stack observations for single task environments."""
from collections import deque
import gym
import numpy as np
from mtenv.utils.types import ActionType, EnvStepReturnType
class FrameStack(gym.Wrapper): # type: ignore[misc]
# Mypy error: Class cannot subclass 'Wrapper' (has type 'Any') [misc]
def __init__(self, env: gym.core.Env, k: int):
"""Wrapper to stack observations for single task environments.
Args:
env (gym.core.Env): Single Task Environment
k (int): number of frames to stack.
"""
gym.Wrapper.__init__(self, env)
self._k = k
self._frames: deque = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype,
)
self._max_episode_steps = env._max_episode_steps
def reset(self) -> np.ndarray:
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action: ActionType) -> EnvStepReturnType:
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self) -> np.ndarray:
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
| 1,554 | 31.395833 | 74 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/wrappers/dmc_wrapper.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, Optional
import dmc2gym
import numpy as np
from dmc2gym.wrappers import DMCWrapper as BaseDMCWrapper
from gym import spaces
import local_dm_control_suite as local_dmc_suite
class DMCWrapper(BaseDMCWrapper):
def __init__(
self,
domain_name: str,
task_name: str,
task_kwargs: Any = None,
visualize_reward: Optional[Dict[str, Any]] = None,
from_pixels: bool = False,
height=84,
width: int = 84,
camera_id: int = 0,
frame_skip: int = 1,
environment_kwargs: Any = None,
channels_first: bool = True,
):
"""This wrapper is based on implementation from
https://github.com/denisyarats/dmc2gym/blob/master/dmc2gym/wrappers.py#L37
We extend the wrapper so that we can use the modified version of
`dm_control_suite`.
"""
assert (
"random" in task_kwargs # type: ignore [operator]
), "please specify a seed, for deterministic behaviour"
self._from_pixels = from_pixels
self._height = height
self._width = width
self._camera_id = camera_id
self._frame_skip = frame_skip
self._channels_first = channels_first
if visualize_reward is None:
visualize_reward = {}
# create task
self._env = local_dmc_suite.load(
domain_name=domain_name,
task_name=task_name,
task_kwargs=task_kwargs,
visualize_reward=visualize_reward,
environment_kwargs=environment_kwargs,
)
# true and normalized action spaces
self._true_action_space = dmc2gym.wrappers._spec_to_box(
[self._env.action_spec()]
)
self._norm_action_space = spaces.Box(
low=-1.0, high=1.0, shape=self._true_action_space.shape, dtype=np.float32
)
# create observation space
if from_pixels:
shape = [3, height, width] if channels_first else [height, width, 3]
self._observation_space = spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8
)
else:
self._observation_space = dmc2gym.wrappers._spec_to_box(
self._env.observation_spec().values()
)
self._state_space = dmc2gym.wrappers._spec_to_box(
self._env.observation_spec().values()
)
self.current_state = None
# set seed
self.seed(seed=task_kwargs["random"]) # type: ignore [index]
| 2,634 | 31.530864 | 85 | py |
mtenv | mtenv-main/mtenv/envs/hipbmdp/wrappers/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/hipbmdp/wrappers/sticky_observation.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to enable sitcky observations for single task environments."""
# type: ignore
import random
from collections import deque
import gym
class StickyObservation(gym.Wrapper):
def __init__(self, env: gym.Env, sticky_probability: float, last_k: int):
"""Env wrapper that returns a previous observation with probability
`p` and the current observation with a probability `1-p`. `last_k`
previous observations are stored.
Args:
env (gym.Env): Single task environment.
sticky_probability (float): Probability `p` for returning a
previous observation.
last_k (int): Number of previous observations to store.
Raises:
ValueError: Raise a ValueError if `sticky_probability` is
not in range `[0, 1]`.
"""
super().__init__(self, env)
if 1 >= sticky_probability >= 0:
self._sticky_probability = sticky_probability
else:
raise ValueError(
f"sticky_probability = {sticky_probability} is not in the interval [0, 1]."
)
self._last_k = last_k + 1
self._observations: deque = deque([], maxlen=self._last_k)
self.observation_space = env.observation_space
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._last_k):
self._observations.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._observations.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._observations) == self._last_k
should_choose_old_observation = random.random() < self._sticky_probability
if should_choose_old_observation:
index = random.randint(0, self._last_k - 2)
return self._observations[index]
else:
return self._observations[-1]
| 2,110 | 36.035088 | 91 | py |
mtenv | mtenv-main/mtenv/envs/mpte/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import setuptools
from mtenv.utils.setup_utils import parse_dependency
env_name = "mpte"
path = Path(__file__).parent / "requirements.txt"
requirements = parse_dependency(path)
with (Path(__file__).parent / "README.md").open() as fh:
long_description = fh.read()
setuptools.setup(
name=env_name,
version="1.0.0",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: MIT",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 737 | 25.357143 | 70 | py |
mtenv | mtenv-main/mtenv/envs/mpte/two_goal_maze_env.py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
import copy
import math
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from gym import spaces
from gym.spaces.box import Box as BoxSpace
from gym.spaces.dict import Dict as DictSpace
from gym.spaces.discrete import Discrete as DiscreteSpace
from gym_miniworld.entity import Box
from gym_miniworld.miniworld import Agent, MiniWorldEnv
from numpy.random import RandomState
from mtenv.utils import seeding
from mtenv.utils.types import DoneType, InfoType, RewardType, TaskObsType
from mtenv.wrappers.env_to_mtenv import EnvToMTEnv
TaskStateType = List[int]
ActionType = int
EnvObsType = Dict[str, Union[int, List[int], List[float]]]
ObsType = Dict[str, Union[EnvObsType, TaskObsType]]
StepReturnType = Tuple[ObsType, RewardType, DoneType, InfoType]
class MTMiniWorldEnv(EnvToMTEnv):
def make_observation(self, env_obs: EnvObsType) -> ObsType:
raise NotImplementedError
def get_task_obs(self) -> TaskObsType:
return self.env.get_task_obs()
def get_task_state(self) -> TaskStateType:
return self.env.task_state
def set_task_state(self, task_state: TaskStateType) -> None:
self.env.set_task_state(task_state)
def sample_task_state(self) -> TaskStateType:
return self.env.sample_task_state()
def reset(self, **kwargs: Dict[str, Any]) -> ObsType: # type: ignore[override]
# signature is incompatible with supertype.
self.assert_env_seed_is_set()
return self.env.reset(**kwargs)
def step(self, action: ActionType) -> StepReturnType: # type: ignore
return self.env.step(action)
def assert_env_seed_is_set(self) -> None:
assert self.env.np_random_env is not None, "please call `seed()` first"
def assert_task_seed_is_set(self) -> None:
assert self.env.np_random_task is not None, "please call `seed_task()` first"
def seed(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for environment observations"""
return self.env.seed(seed=seed)
class TwoGoalMazeEnv(MiniWorldEnv):
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(
self,
size_x=5,
size_y=5,
obs_type="xy",
task_seed=0,
n_tasks=10,
p_change=0.0,
empty_mu=False,
):
assert p_change == 0.0
self.empty_mu = empty_mu
self.obs_type = obs_type
self.seed_task(seed=task_seed)
self.np_random_env: Optional[RandomState] = None
self.size_x, self.size_y = size_x, size_y
self.task_state = []
super().__init__()
# Allow only movement actions (left/right/forward)
self.action_space = spaces.Discrete(self.actions.move_forward + 1)
if self.obs_type == "xy":
_obs_space = BoxSpace(
low=-np.inf, high=np.inf, shape=(4,), dtype=np.float32
)
else:
_obs_space = BoxSpace(
low=-1.0,
high=1.0,
shape=(64, 64),
dtype=np.float32,
)
self.observation_space = DictSpace(
{
"obs": _obs_space,
"total_reward": BoxSpace(
low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32
),
}
)
def assert_env_seed_is_set(self) -> None:
"""Check that the env seed is set."""
assert self.np_random_env is not None, "please call `seed()` first"
def assert_task_seed_is_set(self) -> None:
"""Check that the task seed is set."""
assert self.np_random_task is not None, "please call `seed_task()` first"
def seed_task(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for task information"""
self.np_random_task, seed = seeding.np_random(seed)
assert isinstance(seed, int)
return [seed]
def sample_task_state(self) -> TaskStateType:
self.assert_task_seed_is_set()
return [self.np_random_task.randint(2)]
def set_task_state(self, task_state: TaskStateType) -> None:
self.task_state = task_state
def _gen_world(self):
self.reset_task_state()
room1 = self.add_rect_room(
min_x=-self.size_x,
max_x=self.size_x,
min_z=-self.size_y,
max_z=self.size_y,
wall_tex="brick_wall",
)
self.room1 = room1
room2 = self.add_rect_room(
min_x=-self.size_x,
max_x=self.size_x,
min_z=self.size_y,
max_z=self.size_y + 1,
wall_tex="cardboard",
)
self.connect_rooms(room1, room2, min_x=-self.size_x, max_x=self.size_x)
room3 = self.add_rect_room(
min_x=-self.size_x,
max_x=self.size_x,
min_z=-self.size_y - 1,
max_z=-self.size_y,
wall_tex="lava",
)
self.connect_rooms(room1, room3, min_x=-self.size_x, max_x=self.size_x)
room4 = None
if self.task_state[0] == 0:
room4 = self.add_rect_room(
min_x=-self.size_x - 1,
max_x=-self.size_x,
min_z=-self.size_y,
max_z=self.size_y,
wall_tex="wood_planks",
)
else:
room4 = self.add_rect_room(
min_x=-self.size_x - 1,
max_x=-self.size_x,
min_z=-self.size_y,
max_z=self.size_y,
wall_tex="slime",
)
self.connect_rooms(room1, room4, min_z=-self.size_y, max_z=self.size_y)
room5 = self.add_rect_room(
min_x=self.size_x,
max_x=self.size_x + 1,
min_z=-self.size_y,
max_z=self.size_y,
wall_tex="metal_grill",
)
self.connect_rooms(room1, room5, min_z=-self.size_y, max_z=self.size_y)
self.boxes = []
self.boxes.append(Box(color="blue"))
self.boxes.append(Box(color="red"))
self.place_entity(self.boxes[0], room=room1)
self.place_entity(self.boxes[1], room=room1)
# Choose a random room and position to spawn at
_dir = self.np_random_env.randint(8) * (math.pi / 4) - math.pi
self.place_agent(
dir=_dir,
room=room1,
)
while self._dist() < 2 or self._ndist() < 2:
self.place_agent(
dir=_dir,
room=room1,
)
def _dist(self):
bp = self.boxes[int(self.task_state[0])].pos
pos = self.agent.pos
distance = math.sqrt((bp[0] - pos[0]) ** 2 + (bp[2] - pos[2]) ** 2)
return distance
def _ndist(self):
bp = self.boxes[1 - int(self.task_state[0])].pos
pos = self.agent.pos
distance = math.sqrt((bp[0] - pos[0]) ** 2 + (bp[2] - pos[2]) ** 2)
return distance
def reset(self) -> ObsType:
self.assert_env_seed_is_set()
self.max_episode_steps = 200
self.treward = 0.0
self.step_count = 0
self.agent = Agent()
self.entities: List[Any] = []
self.rooms: List[Any] = []
self.wall_segs: List[Any] = []
self._gen_world()
self.blocked = False
rand = self.rand if self.domain_rand else None
self.params.sample_many(
rand, self, ["sky_color", "light_pos", "light_color", "light_ambient"]
)
for ent in self.entities:
ent.randomize(self.params, rand)
# Compute the min and max x, z extents of the whole floorplan
self.min_x = min(r.min_x for r in self.rooms)
self.max_x = max(r.max_x for r in self.rooms)
self.min_z = min(r.min_z for r in self.rooms)
self.max_z = max(r.max_z for r in self.rooms)
# Generate static data
if len(self.wall_segs) == 0:
self._gen_static_data()
# Pre-compile static parts of the environment into a display list
self._render_static()
_pos = [
(self.agent.pos[0] / self.size_x) * 2.1 - 1.0,
(self.agent.pos[2] / self.size_y) * 2.1 - 1.0,
]
_dir = [self.agent.dir_vec[0], self.agent.dir_vec[2]]
if self.obs_type == "xy":
_mu = [0.0]
at = math.atan2(_dir[0], _dir[1])
o = copy.deepcopy(_pos + [at] + _mu)
else:
o = (self.render_obs() / 255.0) * 2.0 - 1.0
return self.make_obs(env_obs=o, total_reward=[0.0])
def get_task_obs(self) -> TaskObsType:
mmu = copy.deepcopy(self.task_state)
if self.empty_mu:
mmu = [0.0]
return mmu
def get_task_state(self) -> TaskStateType:
return self.task_state
def reset_task_state(self) -> None:
"""Sample a new task_state and set that as the new task_state"""
self.set_task_state(task_state=self.sample_task_state())
def make_obs(self, env_obs: Any, total_reward: List[float]) -> ObsType:
return {
"env_obs": {"obs": env_obs, "total_reward": total_reward},
"task_obs": self.get_task_obs(),
}
def seed(self, seed: Optional[int] = None) -> List[int]:
"""Set the seed for environment observations"""
self.np_random_env, seed = seeding.np_random(seed)
return [seed] + super().seed(seed=seed)
def step(self, action: ActionType) -> StepReturnType:
self.step_count += 1
if not self.blocked:
if action == 2:
self.move_agent(0.51, 0.0) # fwd_step, fwd_drift)
elif action == 0:
self.turn_agent(45)
elif action == 1:
self.turn_agent(-45)
reward = 0.0
done = False
distance = self._dist()
if distance < 2:
reward = +1.0
done = True
distance = self._ndist()
if distance < 2:
reward = -1.0
done = True
_pos = [
(self.agent.pos[0] / self.size_x) * 2.1 - 1.0,
(self.agent.pos[2] / self.size_y) * 2.1 - 1.0,
]
_dir = [self.agent.dir_vec[0], self.agent.dir_vec[2]]
if self.obs_type == "xy":
at = math.atan2(_dir[0], _dir[1])
_mu = [0.0]
if (at < -1.5 and at > -1.7) and not self.empty_mu:
_mu = [1.0]
if self.task_state[0] == 0:
_mu = [-1.0]
o = copy.deepcopy(_pos + [at] + _mu)
else:
o = (self.render_obs() / 255.0) * 2.0 - 1.0
self.treward += reward
return self.make_obs(env_obs=o, total_reward=[self.treward]), reward, done, {}
def build_two_goal_maze_env(size_x: int, size_y: int, task_seed: int, n_tasks: int):
env = MTMiniWorldEnv(
TwoGoalMazeEnv(
size_x=size_x, size_y=size_y, task_seed=task_seed, n_tasks=n_tasks
),
task_observation_space=DiscreteSpace(n=1),
)
return env
| 11,245 | 31.69186 | 122 | py |
mtenv | mtenv-main/mtenv/envs/mpte/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/envs/tabular_mdp/tmdp.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import scipy.special
from gym import spaces
from gym.utils import seeding
from mtenv import MTEnv
class TMDP(MTEnv):
"""Defines a Tabuular MDP where task_state is the reward matrix,transition matrix
reward_matrix is n_states*n_actions and gies the probability of having a reward = +1 when choosing action a in state s (matrix[s,a])
transition_matrix is n_states*n_actions*n_states and gives the probability of moving to state s' when choosing action a in state s (matrix[s,a,s'])
Args:
MTEnv ([type]): [description]
"""
def __init__(self, n_states, n_actions):
self.n_states = n_states
self.n_actions = n_actions
ohigh = np.array([1.0 for n in range(n_states + 1)])
olow = np.array([0.0 for n in range(n_states + 1)])
observation_space = spaces.Box(olow, ohigh, dtype=np.float32)
action_space = spaces.Discrete(n_actions)
self.task_state = (
np.zeros((n_states, n_actions)),
np.zeros((n_states, n_actions, n_states)),
)
o = self.get_task_obs()
thigh = np.ones((len(o),))
tlow = np.zeros((len(o),))
task_space = spaces.Box(tlow, thigh, dtype=np.float32)
super().__init__(
action_space=action_space,
env_observation_space=observation_space,
task_observation_space=task_space,
)
# task state is the reward matrix and transition matrix
def get_task_obs(self):
obs = list(self.task_state[0].flatten()) + list(self.task_state[1].flatten())
return obs
def get_task_state(self):
return self.task_state
def set_task_state(self, task_state):
self.task_state = task_state
def sample_task_state(self):
raise NotImplementedError
def seed(self, env_seed):
self.np_random_env, seed = seeding.np_random(env_seed)
return [seed]
def seed_task(self, task_seed):
self.np_random_task, seed = seeding.np_random(task_seed)
return [seed]
def step(self, action):
t_reward, t_matrix = self.task_state
reward = 0.0
if self.np_random_env.rand() < t_reward[self.state][action]:
reward = 1.0
self.state = self.np_random_env.multinomial(
1, t_matrix[self.state][action]
).argmax()
obs = np.zeros(self.n_states + 1)
obs[self.state] = 1.0
obs[-1] = reward
return (
{"env_obs": list(obs), "task_obs": self.get_task_obs()},
reward,
False,
{},
)
def reset(self):
self.state = self.np_random_env.randint(self.n_states)
obs = np.zeros(self.n_states + 1)
obs[self.state] = 1.0
return {"env_obs": list(obs), "task_obs": self.get_task_obs()}
class UniformTMDP(TMDP):
def __init__(self, n_states, n_actions):
super().__init__(n_states, n_actions)
def sample_task_state(self):
self.assert_task_seed_is_set()
t_reward = self.np_random_task.rand(self.n_states, self.n_actions)
t_transitions = self.np_random_task.randn(
self.n_states, self.n_actions, self.n_states
)
t_transitions = scipy.special.softmax(t_transitions, axis=2)
new_task_state = t_reward, t_transitions
return new_task_state
if __name__ == "__main__":
env = UniformTMDP(3, 2)
env.seed(5)
env.seed_task(14)
env.reset_task_state()
obs = env.reset()
done = False
while not done:
action = np.random.randint(env.action_space.n)
obs, rew, done, _ = env.step(action)
print(obs["env_obs"])
| 3,884 | 30.844262 | 155 | py |
mtenv | mtenv-main/mtenv/envs/tabular_mdp/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
import setuptools
from mtenv.utils.setup_utils import parse_dependency
env_name = "tabular_mdp"
path = Path(__file__).parent / "requirements.txt"
requirements = parse_dependency(path)
setuptools.setup(
name=env_name,
version="1.0.0",
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| 726 | 25.925926 | 70 | py |
mtenv | mtenv-main/mtenv/envs/tabular_mdp/__init__.py | 0 | 0 | 0 | py |
|
mtenv | mtenv-main/mtenv/wrappers/sample_random_task.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper that samples a new task everytime the environment is reset."""
from mtenv import MTEnv
from mtenv.utils.types import ObsType
from mtenv.wrappers.multitask import MultiTask
class SampleRandomTask(MultiTask):
def __init__(self, env: MTEnv):
"""Wrapper that samples a new task everytime the environment is
reset.
Args:
env (MTEnv): Multitask environment to wrap over.
"""
super().__init__(env=env)
def reset(self) -> ObsType:
self.env.reset_task_state()
return self.env.reset()
| 639 | 26.826087 | 73 | py |
mtenv | mtenv-main/mtenv/wrappers/ntasks_id.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to fix the number of tasks in an existing multitask environment
and return the id of the task as part of the observation."""
from gym.spaces import Dict as DictSpace
from gym.spaces import Discrete
from mtenv import MTEnv
from mtenv.utils.types import ActionType, ObsType, StepReturnType, TaskStateType
from mtenv.wrappers.ntasks import NTasks
class NTasksId(NTasks):
def __init__(self, env: MTEnv, n_tasks: int):
"""Wrapper to fix the number of tasks in an existing multitask
environment to `n_tasks`.
Each task is sampled in this fixed set of `n_tasks`. The agent
observes the id of the task.
Args:
env (MTEnv): Multitask environment to wrap over.
n_tasks (int): Number of tasks to sample.
"""
self.env = env
super().__init__(n_tasks=n_tasks, env=env)
self.task_state: TaskStateType
self.observation_space: DictSpace = DictSpace(
spaces={
"env_obs": self.observation_space["env_obs"],
"task_obs": Discrete(n_tasks),
}
)
def _update_obs(self, obs: ObsType) -> ObsType:
obs["task_obs"] = self.get_task_obs()
return obs
def step(self, action: ActionType) -> StepReturnType:
obs, reward, done, info = self.env.step(action)
return self._update_obs(obs), reward, done, info
def get_task_obs(self) -> TaskStateType:
return self.task_state
def get_task_state(self) -> TaskStateType:
return self.task_state
def set_task_state(self, task_state: TaskStateType) -> None:
self.env.set_task_state(self.tasks[task_state])
self.task_state = task_state
def reset(self) -> ObsType:
obs = self.env.reset()
return self._update_obs(obs)
def sample_task_state(self) -> TaskStateType:
self.assert_task_seed_is_set()
if not self._are_tasks_set:
self.tasks = [self.env.sample_task_state() for _ in range(self.n_tasks)]
self._are_tasks_set = True
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
id_task = self.np_random_task.randint(self.n_tasks) # type: ignore[union-attr]
return id_task
| 2,410 | 34.455882 | 94 | py |
mtenv | mtenv-main/mtenv/wrappers/multitask.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to change the behaviour of an existing multitask environment."""
from typing import List, Optional
from numpy.random import RandomState
from mtenv import MTEnv
from mtenv.utils import seeding
from mtenv.utils.types import (
ActionType,
ObsType,
StepReturnType,
TaskObsType,
TaskStateType,
)
class MultiTask(MTEnv):
def __init__(self, env: MTEnv):
"""Wrapper to change the behaviour of an existing multitask environment
Args:
env (MTEnv): Multitask environment to wrap over.
"""
self.env = env
self.observation_space = self.env.observation_space
self.action_space = self.env.action_space
self.np_random_env: Optional[RandomState] = None
self.np_random_task: Optional[RandomState] = None
def step(self, action: ActionType) -> StepReturnType:
return self.env.step(action)
def get_task_obs(self) -> TaskObsType:
return self.env.get_task_obs()
def get_task_state(self) -> TaskStateType:
return self.env.get_task_state()
def set_task_state(self, task_state: TaskStateType) -> None:
self.env.set_task_state(task_state)
def assert_env_seed_is_set(self) -> None:
"""Check that the env seed is set."""
assert self.np_random_env is not None, "please call `seed()` first"
self.env.assert_env_seed_is_set()
def assert_task_seed_is_set(self) -> None:
"""Check that the task seed is set."""
assert self.np_random_task is not None, "please call `seed_task()` first"
self.env.assert_task_seed_is_set()
def reset(self) -> ObsType:
return self.env.reset()
def sample_task_state(self) -> TaskStateType:
return self.env.sample_task_state()
def reset_task_state(self) -> None:
self.env.reset_task_state()
def seed(self, seed: Optional[int] = None) -> List[int]:
self.np_random_env, seed = seeding.np_random(seed)
return [seed] + self.env.seed(seed)
def seed_task(self, seed: Optional[int] = None) -> List[int]:
self.np_random_task, seed = seeding.np_random(seed)
return [seed] + self.env.seed_task(seed)
| 2,261 | 31.314286 | 81 | py |
mtenv | mtenv-main/mtenv/wrappers/ntasks.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to fix the number of tasks in an existing multitask environment."""
from typing import List
from mtenv import MTEnv
from mtenv.utils.types import TaskStateType
from mtenv.wrappers.multitask import MultiTask
class NTasks(MultiTask):
def __init__(self, env: MTEnv, n_tasks: int):
"""Wrapper to fix the number of tasks in an existing multitask
environment to `n_tasks`.
Each task is sampled in this fixed set of `n_tasks`.
Args:
env (MTEnv): Multitask environment to wrap over.
n_tasks (int): Number of tasks to sample.
"""
super().__init__(env=env)
self.n_tasks = n_tasks
self.tasks: List[TaskStateType]
self._are_tasks_set = False
def sample_task_state(self) -> TaskStateType:
"""Sample a `task_state` from the set of `n_tasks` tasks.
`task_state` contains all the information that the environment
needs to switch to any other task.
The subclasses, extending this class, should ensure that the task
seed is set (by calling `seed(int)`) before invoking this
method (for reproducibility). It can be done by invoking
`self.assert_task_seed_is_set()`.
Returns:
TaskStateType: For more information on `task_state`,
refer :ref:`task_state`.
"""
self.assert_task_seed_is_set()
if not self._are_tasks_set:
self.tasks = [self.env.sample_task_state() for _ in range(self.n_tasks)]
self._are_tasks_set = True
# The assert statement (at the start of the function) ensures that self.np_random_task
# is not None. Mypy is raising the warning incorrectly.
id_task = self.np_random_task.randint(self.n_tasks) # type: ignore[union-attr]
return self.tasks[id_task]
def reset_task_state(self) -> None:
"""Sample a new task_state from the set of `n_tasks` tasks and
set the environment to that `task_state`.
For more information on `task_state`, refer :ref:`task_state`.
"""
self.set_task_state(task_state=self.sample_task_state())
| 2,223 | 36.694915 | 94 | py |
mtenv | mtenv-main/mtenv/wrappers/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mtenv.wrappers.ntasks import NTasks # noqa: F401
from mtenv.wrappers.ntasks_id import NTasksId # noqa: F401
from mtenv.wrappers.sample_random_task import SampleRandomTask # noqa: F401
| 263 | 51.8 | 76 | py |
mtenv | mtenv-main/mtenv/wrappers/env_to_mtenv.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Wrapper to convert an environment into multitask environment."""
from typing import Any, Dict, List, Optional
from gym.core import Env
from gym.spaces.space import Space
from mtenv import MTEnv
from mtenv.utils import seeding
from mtenv.utils.types import (
ActionType,
EnvObsType,
ObsType,
StepReturnType,
TaskObsType,
TaskStateType,
)
class EnvToMTEnv(MTEnv):
def __init__(self, env: Env, task_observation_space: Space) -> None:
"""Wrapper to convert an environment into a multitak environment.
Args:
env (Env): Environment to wrap over.
task_observation_space (Space): Task observation space for the
resulting multitask environment.
"""
super().__init__(
action_space=env.action_space,
env_observation_space=env.observation_space,
task_observation_space=task_observation_space,
)
self.env = env
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
@property
def spec(self) -> Any:
return self.env.spec
@classmethod
def class_name(cls) -> str:
return cls.__name__
def _make_observation(self, env_obs: EnvObsType) -> ObsType:
return {"env_obs": env_obs, "task_obs": self.get_task_obs()}
def get_task_obs(self) -> TaskObsType:
return self._task_obs
def get_task_state(self) -> TaskStateType:
raise NotImplementedError
def set_task_state(self, task_state: TaskStateType) -> None:
raise NotImplementedError
def sample_task_state(self) -> TaskStateType:
raise NotImplementedError
def reset(self, **kwargs: Dict[str, Any]) -> ObsType:
self.assert_env_seed_is_set()
env_obs = self.env.reset(**kwargs)
return self._make_observation(env_obs=env_obs)
def reset_task_state(self) -> None:
self.set_task_state(task_state=self.sample_task_state())
def step(self, action: ActionType) -> StepReturnType:
env_obs, reward, done, info = self.env.step(action)
return (
self._make_observation(env_obs=env_obs),
reward,
done,
info,
)
def seed(self, seed: Optional[int] = None) -> List[int]:
self.np_random_env, seed = seeding.np_random(seed)
env_seeds = self.env.seed(seed)
if isinstance(env_seeds, list):
return [seed] + env_seeds
return [seed]
def render(self, mode: str = "human", **kwargs: Dict[str, Any]) -> Any:
"""Renders the environment."""
return self.env.render(mode, **kwargs)
def close(self) -> Any:
return self.env.close()
def __str__(self) -> str:
return f"{type(self).__name__}{self.env}"
def __repr__(self) -> str:
return str(self)
@property
def unwrapped(self) -> Env:
return self.env.unwrapped
def __getattr__(self, name: str) -> Any:
if name.startswith("_"):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.env, name)
| 3,253 | 28.581818 | 78 | py |
mtenv | mtenv-main/mtenv/utils/setup_utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from pathlib import Path
from typing import List
def parse_dependency(filepath: Path) -> List[str]:
"""Parse python dependencies from a file.
The list of dependencies is used by `setup.py` files. Lines starting
with "#" are ingored (useful for writing comments). In case the
dependnecy is host using git, the url is parsed and modified to make
suitable for `setup.py` files.
Args:
filepath (Path):
Returns:
List[str]: List of dependencies
"""
dep_list = []
for dep in open(filepath).read().splitlines():
if dep.startswith("#"):
continue
key = "#egg="
if key in dep:
git_link, egg_name = dep.split(key)
dep = f"{egg_name} @ {git_link}"
dep_list.append(dep)
return dep_list
| 877 | 27.322581 | 72 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.