text
stringlengths 0
93.6k
|
---|
state = env.reset()
|
episode_reward = 0.
|
done = False
|
while not done:
|
if render:
|
env.render()
|
time.sleep(0.01)
|
action = agent.sample_action(state, eval=True)
|
next_state, reward, done, _ = env.step(action)
|
episode_reward += reward
|
state = next_state
|
print(episode_reward)
|
returns[i] = episode_reward
|
mean_return = np.mean(returns)
|
std_return = np.std(returns)
|
print('-' * 60)
|
print(f'Num steps: {steps:<5} '
|
f'reward: {mean_return:<5.1f} '
|
f'std: {std_return:<5.1f}')
|
print(returns)
|
print('-' * 60)
|
return mean_return
|
def main(args=None):
|
device = torch.device(args.cuda)
|
dir = "record"
|
# dir = "test"
|
log_dir = os.path.join(dir, f'{args.env_name}', f'policy_type={args.policy_type}', f'ratio={args.ratio}',
|
f'seed={args.seed}')
|
# Initial environment
|
env = gym.make(args.env_name)
|
eval_env = copy.deepcopy((env))
|
state_size = int(np.prod(env.observation_space.shape))
|
action_size = int(np.prod(env.action_space.shape))
|
print(action_size)
|
# Set random seed
|
torch.manual_seed(args.seed)
|
np.random.seed(args.seed)
|
env.seed(args.seed)
|
eval_env.seed(args.seed)
|
memory_size = 1e6
|
num_steps = args.num_steps
|
start_steps = 10000
|
eval_interval = 10000
|
updates_per_step = 1
|
batch_size = args.batch_size
|
log_interval = 10
|
memory = ReplayMemory(state_size, action_size, memory_size, device)
|
diffusion_memory = DiffusionMemory(state_size, action_size, memory_size, device)
|
agent = QVPO(args, state_size, env.action_space, memory, diffusion_memory, device)
|
agent.load_model(os.path.join('./results', prefix + '_' + name), id=args.id)
|
if os.path.exists(os.path.join('./results', prefix + '_' + name, 'config_' + args.id[:-2] + '.pkl')):
|
with open(os.path.join('./results', prefix + '_' + name, 'config_' + args.id[:-2] + '.pkl'), 'rb') as f:
|
conf = pickle.load(f)
|
for k, v in conf._get_kwargs():
|
print(f"{k}: {v}")
|
steps = 0
|
episodes = 0
|
best_result = 0
|
if steps % eval_interval == 0:
|
evaluate(eval_env, agent, steps, args.render)
|
if __name__ == "__main__":
|
args = readParser()
|
if args.target_sample == -1:
|
args.target_sample = args.behavior_sample
|
## settings
|
prefix = 'qvpo'
|
name = args.env_name
|
keys = ("epoch", "reward")
|
times = args.times
|
## run
|
for t in range(times):
|
main(args)
|
# <FILESEP>
|
import json
|
import requests
|
import time
|
import datetime
|
from collections import defaultdict
|
class Webhook:
|
def __init__(self, url, **kwargs):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.