nguyenduchuyiu commited on
Commit
cfb974a
·
1 Parent(s): 1391cd4

Initial commit

Browse files
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  type: SpaceInvadersNoFrameskip-v4
17
  metrics:
18
  - type: mean_reward
19
- value: 587.00 +/- 118.37
20
  name: mean_reward
21
  verified: false
22
  ---
@@ -63,7 +63,7 @@ python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f lo
63
  ## Hyperparameters
64
  ```python
65
  OrderedDict([('batch_size', 32),
66
- ('buffer_size', 10000),
67
  ('env_wrapper',
68
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
69
  ('exploration_final_eps', 0.01),
@@ -72,8 +72,8 @@ OrderedDict([('batch_size', 32),
72
  ('gradient_steps', 1),
73
  ('learning_rate', 0.0001),
74
  ('learning_starts', 100000),
75
- ('n_timesteps', 10000000.0),
76
- ('optimize_memory_usage', True),
77
  ('policy', 'CnnPolicy'),
78
  ('target_update_interval', 1000),
79
  ('train_freq', 4),
 
16
  type: SpaceInvadersNoFrameskip-v4
17
  metrics:
18
  - type: mean_reward
19
+ value: 682.00 +/- 137.10
20
  name: mean_reward
21
  verified: false
22
  ---
 
63
  ## Hyperparameters
64
  ```python
65
  OrderedDict([('batch_size', 32),
66
+ ('buffer_size', 100000),
67
  ('env_wrapper',
68
  ['stable_baselines3.common.atari_wrappers.AtariWrapper']),
69
  ('exploration_final_eps', 0.01),
 
72
  ('gradient_steps', 1),
73
  ('learning_rate', 0.0001),
74
  ('learning_starts', 100000),
75
+ ('n_timesteps', 1000000.0),
76
+ ('optimize_memory_usage', False),
77
  ('policy', 'CnnPolicy'),
78
  ('target_update_interval', 1000),
79
  ('train_freq', 4),
args.yml CHANGED
@@ -1,24 +1,34 @@
1
  !!python/object/apply:collections.OrderedDict
2
  - - - algo
3
  - dqn
 
 
 
 
4
  - - env
5
  - SpaceInvadersNoFrameskip-v4
6
  - - env_kwargs
7
  - null
 
 
8
  - - eval_episodes
9
- - 10
10
  - - eval_freq
11
- - 10000
12
  - - gym_packages
13
  - []
14
  - - hyperparams
15
  - null
16
  - - log_folder
17
- - rl-trained-agents/
18
  - - log_interval
19
  - -1
 
 
 
 
20
  - - n_evaluations
21
- - 20
22
  - - n_jobs
23
  - 1
24
  - - n_startup_trials
@@ -26,11 +36,17 @@
26
  - - n_timesteps
27
  - -1
28
  - - n_trials
29
- - 10
 
 
30
  - - num_threads
31
  - -1
 
 
32
  - - optimize_hyperparameters
33
  - false
 
 
34
  - - pruner
35
  - median
36
  - - sampler
@@ -40,20 +56,28 @@
40
  - - save_replay_buffer
41
  - false
42
  - - seed
43
- - 234163638
44
  - - storage
45
  - null
46
  - - study_name
47
  - null
48
  - - tensorboard_log
49
  - ''
 
 
50
  - - trained_agent
51
  - ''
52
  - - truncate_last_trajectory
53
  - true
54
  - - uuid
55
- - true
56
  - - vec_env
57
  - dummy
58
  - - verbose
59
  - 1
 
 
 
 
 
 
 
1
  !!python/object/apply:collections.OrderedDict
2
  - - - algo
3
  - dqn
4
+ - - conf_file
5
+ - dqn.yml
6
+ - - device
7
+ - auto
8
  - - env
9
  - SpaceInvadersNoFrameskip-v4
10
  - - env_kwargs
11
  - null
12
+ - - eval_env_kwargs
13
+ - null
14
  - - eval_episodes
15
+ - 5
16
  - - eval_freq
17
+ - 25000
18
  - - gym_packages
19
  - []
20
  - - hyperparams
21
  - null
22
  - - log_folder
23
+ - logs/
24
  - - log_interval
25
  - -1
26
+ - - max_total_trials
27
+ - null
28
+ - - n_eval_envs
29
+ - 1
30
  - - n_evaluations
31
+ - null
32
  - - n_jobs
33
  - 1
34
  - - n_startup_trials
 
36
  - - n_timesteps
37
  - -1
38
  - - n_trials
39
+ - 500
40
+ - - no_optim_plots
41
+ - false
42
  - - num_threads
43
  - -1
44
+ - - optimization_log_path
45
+ - null
46
  - - optimize_hyperparameters
47
  - false
48
+ - - progress
49
+ - true
50
  - - pruner
51
  - median
52
  - - sampler
 
56
  - - save_replay_buffer
57
  - false
58
  - - seed
59
+ - 2528224499
60
  - - storage
61
  - null
62
  - - study_name
63
  - null
64
  - - tensorboard_log
65
  - ''
66
+ - - track
67
+ - false
68
  - - trained_agent
69
  - ''
70
  - - truncate_last_trajectory
71
  - true
72
  - - uuid
73
+ - false
74
  - - vec_env
75
  - dummy
76
  - - verbose
77
  - 1
78
+ - - wandb_entity
79
+ - null
80
+ - - wandb_project_name
81
+ - sb3
82
+ - - wandb_tags
83
+ - []
config.yml CHANGED
@@ -2,7 +2,7 @@
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
- - 10000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
@@ -18,9 +18,9 @@
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
- - 10000000.0
22
  - - optimize_memory_usage
23
- - true
24
  - - policy
25
  - CnnPolicy
26
  - - target_update_interval
 
2
  - - - batch_size
3
  - 32
4
  - - buffer_size
5
+ - 100000
6
  - - env_wrapper
7
  - - stable_baselines3.common.atari_wrappers.AtariWrapper
8
  - - exploration_final_eps
 
18
  - - learning_starts
19
  - 100000
20
  - - n_timesteps
21
+ - 1000000.0
22
  - - optimize_memory_usage
23
+ - false
24
  - - policy
25
  - CnnPolicy
26
  - - target_update_interval
dqn-SpaceInvadersNoFrameskip-v4.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:740e4388373f286a49d5e3bd59711bb1bc7df80fc3e400e8bff4c5b88ebf8cb1
3
- size 27219385
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ebb27988162d1b41f530fad10131e9b4ad3735b6c79d9fe7e91af91bdb979a8
3
+ size 27221655
dqn-SpaceInvadersNoFrameskip-v4/_stable_baselines3_version CHANGED
@@ -1 +1 @@
1
- 2.4.0a7
 
1
+ 2.3.2
dqn-SpaceInvadersNoFrameskip-v4/data CHANGED
The diff for this file is too large to render. See raw diff
 
dqn-SpaceInvadersNoFrameskip-v4/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c84b8e885044f7a519b5f2e8d11b729cff094f707153dfc4b355bb7912b4680
3
- size 13506108
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52d4ed6880a23202b0a9e4aca6a3f5367492e716906f30abf3973f84c46fc726
3
+ size 13506236
dqn-SpaceInvadersNoFrameskip-v4/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:49caa05efa450005dbf9dfb9798c8cbf0e826a937dc985ccbe496e339a219598
3
  size 13505370
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ba5ac28b22e410b620e382d694643f009185ec3c691b1f3cc4908c2c5242bbf
3
  size 13505370
dqn-SpaceInvadersNoFrameskip-v4/system_info.txt CHANGED
@@ -1,9 +1,9 @@
1
- - OS: Linux-6.1.85+-x86_64-with-glibc2.35 # 1 SMP PREEMPT_DYNAMIC Thu Jun 27 21:05:47 UTC 2024
2
- - Python: 3.10.12
3
- - Stable-Baselines3: 2.4.0a7
4
- - PyTorch: 2.3.1+cu121
5
  - GPU Enabled: True
6
- - Numpy: 1.26.4
7
- - Cloudpickle: 2.2.1
8
  - Gymnasium: 0.29.1
9
- - OpenAI Gym: 0.25.2
 
1
+ - OS: Linux-6.8.0-39-generic-x86_64-with-glibc2.39 # 39-Ubuntu SMP PREEMPT_DYNAMIC Fri Jul 5 21:49:14 UTC 2024
2
+ - Python: 3.10.0
3
+ - Stable-Baselines3: 2.3.2
4
+ - PyTorch: 2.4.0+cu124
5
  - GPU Enabled: True
6
+ - Numpy: 2.0.1
7
+ - Cloudpickle: 3.0.0
8
  - Gymnasium: 0.29.1
9
+ - OpenAI Gym: 0.26.2
replay.mp4 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fdf2840313331c8be0643895de920edf3f2f670e72b731e2e246bb708ca8cc3
3
- size 234832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d105169af6c09796da40e11bbf22fcebc0301d5316ed2c1c7afaaed894a86210
3
+ size 224791
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 587.0, "std_reward": 118.36807001890332, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-07-30T17:14:18.274286"}
 
1
+ {"mean_reward": 682.0, "std_reward": 137.09850473291092, "is_deterministic": false, "n_eval_episodes": 10, "eval_datetime": "2024-07-31T22:09:45.206036"}
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c33b3588fd72a96f8eaa830128da81e2b3c48c19ef89efc70beed87c4e42626
3
- size 446800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a874b96cf29fb220e7c1ba59192505e2c1089fa529fe0a4baf20adebc9d21f6
3
+ size 35810