dvesely commited on
Commit
9f2f611
·
1 Parent(s): c495516

Initial commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ replay.mp4 filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -16,7 +16,7 @@ model-index:
16
  type: AntBulletEnv-v0
17
  metrics:
18
  - type: mean_reward
19
- value: 36.60 +/- 32.03
20
  name: mean_reward
21
  verified: false
22
  ---
 
16
  type: AntBulletEnv-v0
17
  metrics:
18
  - type: mean_reward
19
+ value: 2043.59 +/- 93.93
20
  name: mean_reward
21
  verified: false
22
  ---
a2c-AntBulletEnv-v0.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c13711427ec9abd0aaf4310eb12fa3ceb9db39344fec85b4403c6d89b4893be
3
- size 67336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c6c99ba28f85ed1a4bcf1bcf9a939985d8394e640f13ab97f3070d39753438a
3
+ size 129247
a2c-AntBulletEnv-v0/data CHANGED
@@ -4,20 +4,20 @@
4
  ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
5
  "__module__": "stable_baselines3.common.policies",
6
  "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
- "__init__": "<function ActorCriticPolicy.__init__ at 0x7f124b6b04c0>",
8
- "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f124b6b0550>",
9
- "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f124b6b05e0>",
10
- "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f124b6b0670>",
11
- "_build": "<function ActorCriticPolicy._build at 0x7f124b6b0700>",
12
- "forward": "<function ActorCriticPolicy.forward at 0x7f124b6b0790>",
13
- "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f124b6b0820>",
14
- "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f124b6b08b0>",
15
- "_predict": "<function ActorCriticPolicy._predict at 0x7f124b6b0940>",
16
- "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f124b6b09d0>",
17
- "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f124b6b0a60>",
18
- "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f124b6b0af0>",
19
  "__abstractmethods__": "frozenset()",
20
- "_abc_impl": "<_abc._abc_data object at 0x7f124b634800>"
21
  },
22
  "verbose": 1,
23
  "policy_kwargs": {
@@ -32,29 +32,51 @@
32
  "weight_decay": 0
33
  }
34
  },
35
- "num_timesteps": 0,
36
- "_total_timesteps": 0,
37
  "_num_timesteps_at_start": 0,
38
  "seed": null,
39
  "action_noise": null,
40
- "start_time": null,
41
- "learning_rate": 0.00096,
42
  "tensorboard_log": null,
43
  "lr_schedule": {
44
  ":type:": "<class 'function'>",
45
- ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/T3UQTVUdaYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
 
 
 
 
 
 
 
 
 
 
 
 
46
  },
47
- "_last_obs": null,
48
- "_last_episode_starts": null,
49
- "_last_original_obs": null,
50
  "_episode_num": 0,
51
  "use_sde": true,
52
  "sde_sample_freq": -1,
53
- "_current_progress_remaining": 1,
54
  "_stats_window_size": 100,
55
- "ep_info_buffer": null,
56
- "ep_success_buffer": null,
57
- "_n_updates": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  "observation_space": {
59
  ":type:": "<class 'gym.spaces.box.Box'>",
60
  ":serialized:": "gAWVZwIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgKSxyFlIwBQ5R0lFKUjARoaWdolGgSKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaApLHIWUaBV0lFKUjA1ib3VuZGVkX2JlbG93lGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCFLHIWUaBV0lFKUjApfbnBfcmFuZG9tlE51Yi4=",
@@ -81,12 +103,5 @@
81
  "bounded_above": "[ True True True True True True True True]",
82
  "_np_random": null
83
  },
84
- "n_envs": 4,
85
- "n_steps": 8,
86
- "gamma": 0.99,
87
- "gae_lambda": 0.9,
88
- "ent_coef": 0.0,
89
- "vf_coef": 0.4,
90
- "max_grad_norm": 0.5,
91
- "normalize_advantage": false
92
  }
 
4
  ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
5
  "__module__": "stable_baselines3.common.policies",
6
  "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
7
+ "__init__": "<function ActorCriticPolicy.__init__ at 0x7fe4afd50040>",
8
+ "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fe4afd500d0>",
9
+ "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fe4afd50160>",
10
+ "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fe4afd501f0>",
11
+ "_build": "<function ActorCriticPolicy._build at 0x7fe4afd50280>",
12
+ "forward": "<function ActorCriticPolicy.forward at 0x7fe4afd50310>",
13
+ "extract_features": "<function ActorCriticPolicy.extract_features at 0x7fe4afd503a0>",
14
+ "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fe4afd50430>",
15
+ "_predict": "<function ActorCriticPolicy._predict at 0x7fe4afd504c0>",
16
+ "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fe4afd50550>",
17
+ "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fe4afd505e0>",
18
+ "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fe4afd50670>",
19
  "__abstractmethods__": "frozenset()",
20
+ "_abc_impl": "<_abc._abc_data object at 0x7fe4afd4ff80>"
21
  },
22
  "verbose": 1,
23
  "policy_kwargs": {
 
32
  "weight_decay": 0
33
  }
34
  },
35
+ "num_timesteps": 1000064,
36
+ "_total_timesteps": 1000000,
37
  "_num_timesteps_at_start": 0,
38
  "seed": null,
39
  "action_noise": null,
40
+ "start_time": 1681378097059957234,
41
+ "learning_rate": 0.001,
42
  "tensorboard_log": null,
43
  "lr_schedule": {
44
  ":type:": "<class 'function'>",
45
+ ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
46
+ },
47
+ "_last_obs": {
48
+ ":type:": "<class 'numpy.ndarray'>",
49
+ ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAADmUDb0+dcw9324IP9xoxD+vDhu98+LxPvT6Q78+Z5K/+i5JP+bfMj/p+Bs/l/0oPzOSFr+cfoa/G6mPPwmIFT3f1t69CY6Hv7tSlT6hhAhAvOK5P2DNf7/1yVA6z0sXQHErXj+3KQg/fyv6Pp3bjr9YvYG/+0YbPpPuBj8b9Ew/NZlhvxbugT8u5iy/GU1Gv8XzI79T8hI/nCIDP60PIz+/c36/arlYvtTLjz+o94M7pTs2PlElwr/QLsk+qeXCP+EIDD9oHA+/R1TavmlUsT9xK14/tykIP38r+j6d246/Jk8wPnM3j79Zqjw8N4mhPtblCD8tmEk/MmzpPQtY/r5VUtU+QEgMwP1L8T4xmSnAMteOv71ZoD4YdNC9RgWlPZQkOT9jLgNA9WF8PvZMFcCJKo+/ghEPPgj/XD/Ttg0+qX2Tv7cpCD9/K/o+9V9lP9aJGT9MOx++1KIHPyU+AkDyO8a/Gmnzv6KuUL44wcW/5GqsvuZbE0DO4pe+JoMJQAkeBD9GIae7dZ+PP9wHvDy9DBc+xiv7vl8kDj/AC70+SplHP2FDdD/z3GI76YASv6l9k7+3KQg/fyv6Pp3bjr+UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"
50
+ },
51
+ "_last_episode_starts": {
52
+ ":type:": "<class 'numpy.ndarray'>",
53
+ ":serialized:": "gAWVdwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYEAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwSFlIwBQ5R0lFKULg=="
54
+ },
55
+ "_last_original_obs": {
56
+ ":type:": "<class 'numpy.ndarray'>",
57
+ ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAAAAAAAblxk2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACAxsnovQAAAACfzfS/AAAAAPaqrb0AAAAAb9ztPwAAAADs/Ii8AAAAACqq3j8AAAAAegOqvQAAAADchva/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA91mptQAAgD8AAAAAAAAAAAAAAAAAAAAAAAAAgOU8w70AAAAAuP/4vwAAAAC3PAO+AAAAAImh5j8AAAAAbHX3vQAAAACiYvo/AAAAAP6Oy70AAAAAYrr4vwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFaziTYAAIA/AAAAAAAAAAAAAAAAAAAAAAAAAIAAUQa+AAAAAEtg+L8AAAAAm/eVPQAAAAAz2+g/AAAAAN3flLwAAAAAiAPkPwAAAADVUoI8AAAAAFY/278AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACg6Bc2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACASfsPvgAAAAD1ENq/AAAAAO1Kbb0AAAAAIJTnPwAAAAB0tOi9AAAAAL9H8T8AAAAAVxYEvgAAAAB9ee6/AAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"
58
  },
 
 
 
59
  "_episode_num": 0,
60
  "use_sde": true,
61
  "sde_sample_freq": -1,
62
+ "_current_progress_remaining": -6.4000000000064e-05,
63
  "_stats_window_size": 100,
64
+ "ep_info_buffer": {
65
+ ":type:": "<class 'collections.deque'>",
66
+ ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQJ07vBoEjgSMAWyUTegDjAF0lEdAm/8BIFvAGnV9lChoBkdAnknWff4yoGgHTegDaAhHQJwIl67dzn11fZQoaAZHQJ5ZQCDEm6ZoB03oA2gIR0CcDs6FdszmdX2UKGgGR0CeSnN70Fr3aAdN6ANoCEdAnBm7IT4+KXV9lChoBkdAm1iis0YTCmgHTegDaAhHQJwiAh/y5I91fZQoaAZHQJyY+Z5Rjz9oB03oA2gIR0CcKhyRSxZ/dX2UKGgGR0CdfTGHYYixaAdN6ANoCEdAnC5bKzRhMXV9lChoBkdAmecbN4Z/C2gHTegDaAhHQJw1Uaya/h51fZQoaAZHQJ5JGNR3u/loB03oA2gIR0CcPOQV9F4LdX2UKGgGR0CeEQA6dUbUaAdN6ANoCEdAnEXSVv/BFnV9lChoBkdAm3aw1aW5Y2gHTegDaAhHQJxL5wcYIjZ1fZQoaAZHQJsU4bIcR15oB03oA2gIR0CcVqmOU+s6dX2UKGgGR0CcXCCHRCyAaAdN6ANoCEdAnF/GVJL/THV9lChoBkdAnndJMQEpzGgHTegDaAhHQJxn7N7jT8Z1fZQoaAZHQJvIda8pTddoB03oA2gIR0CcbC4pc5bRdX2UKGgGR0CeoRRwqAjIaAdN6ANoCEdAnHMcjmjj73V9lChoBkdAngizbFjur2gHTegDaAhHQJx6n+OwPiF1fZQoaAZHQJrAshpxm05oB03oA2gIR0Ccgt1nM+vAdX2UKGgGR0CbE7A3kxREaAdN6ANoCEdAnIkoBBAv+XV9lChoBkdAnM8utfXws2gHTegDaAhHQJyTxwm3OOd1fZQoaAZHQJ4s77BO58VoB03oA2gIR0CcnaC6pYLcdX2UKGgGR0CcWdJ7LMcIaAdN6ANoCEdAnKWtUjs2N3V9lChoBkdAm1XBXCCSR2gHTegDaAhHQJyp+nWJ79h1fZQoaAZHQJ/ETzK9wm5oB03oA2gIR0CcsNTaTOgQdX2UKGgGR0CaJFmhM8HOaAdN6ANoCEdAnLhnTI/7i3V9lChoBkdAnMECDmKZUmgHTegDaAhHQJzAewTufEp1fZQoaAZHQKBT5donKGNoB03oA2gIR0Ccxe5Z8rqddX2UKGgGR0CgGoT8xbjcaAdN6ANoCEdAnNAoqgAZKnV9lChoBkdAnU2k4aP0ZmgHTegDaAhHQJzbATnJT2p1fZQoaAZHQJ+WkfnwG4ZoB03oA2gIR0Cc4y1WKdhBdX2UKGgGR0CeW09aEBbOaAdN6ANoCEdAnOd7UkOZs3V9lChoBkdAnwIafra/RGgHTegDaAhHQJzuWd6LOzJ1fZQoaAZHQJ7JfOryUcJoB03oA2gIR0Cc9diwB5oodX2UKGgGR0CbvYLbHp8naAdN6ANoCEdAnP36E384xXV9lChoBkdAniev5k9U0mgHTegDaAhHQJ0Cx5Qgs9V1fZQoaAZHQJ4qaUW2w3ZoB03oA2gIR0CdDLIUrTYvdX2UKGgGR0CgMA9Kujh2aAdN6ANoCEdAnRiOHWSU1XV9lChoBkdAoAriol2NemgHTegDaAhHQJ0grhCMPz51fZQoaAZHQJ7NdqHoHLRoB03oA2gIR0CdJPCKaXrudX2UKGgGR0Cgc5BppN9IaAdN6ANoCEdAnSvPPHDJl3V9lChoBkdAn25Fiay8jGgHTegDaAhHQJ0zMPhAGB51fZQoaAZHQJ4NU/MW43FoB03oA2gIR0CdOzT+NtIkdX2UKGgGR0CeqIHGjsUqaAdN6ANoCEdAnT+Bo7FKkHV9lChoBkdAntb0q+ajOGgHTegDaAhHQJ1JCR5kbxV1fZQoaAZHQJpPrPLPldVoB03oA2gIR0CdVPV0cOsldX2UKGgGR0Cb5MA93bEhaAdN6ANoCEdAnV4FGPPszHV9lChoBkdAnDM/gaWHDmgHTegDaAhHQJ1iZVfeDWd1fZQoaAZHQJT+UzGgi/xoB03oA2gIR0CdaX0oBq9HdX2UKGgGR0CcPuVMVUMoaAdN6ANoCEdAnXEZrP+n63V9lChoBkdAk1sklVtGeGgHTegDaAhHQJ15cD4gzP91fZQoaAZHQJzweYUnG85oB03oA2gIR0Cdfcby6MBIdX2UKGgGR0CbJKMF2V3VaAdN6ANoCEdAnYcFhCtzS3V9lChoBkdAmJ1rdznzQWgHTegDaAhHQJ2S8tPHktF1fZQoaAZHQI8B/bypaRpoB03oA2gIR0CdnLQ6ZH/cdX2UKGgGR0CG6WYgJTl1aAdN6ANoCEdAnaEm51/2CnV9lChoBkdAlS89yo4uLGgHTegDaAhHQJ2oWclPact1fZQoaAZHQI313wiJO35oB03oA2gIR0CdsDFWXC0odX2UKGgGR0CQaklNlAeJaAdN6ANoCEdAnbhz7l7tzHV9lChoBkdAm25XaSLZSWgHTegDaAhHQJ28qYv38Gd1fZQoaAZHQJpWUwco6S1oB03oA2gIR0CdxauaF23bdX2UKGgGR0CeMrDx9XtCaAdN6ANoCEdAndE3meUY9HV9lChoBkdAnhotzbN8mmgHTegDaAhHQJ3a4I+nqFB1fZQoaAZHQJ0tKwTufEpoB03oA2gIR0Cd3yKv3ai9dX2UKGgGR0CcGr5HVf/naAdN6ANoCEdAneX9yPuG9HV9lChoBkdAm4kf+bVjJGgHTegDaAhHQJ3totsenyd1fZQoaAZHQJqTUzKs+3ZoB03oA2gIR0Cd9dJJ5E+gdX2UKGgGR0CdBBPJJXhgaAdN6ANoCEdAnfozWXkYGnV9lChoBkdAnEohAGB4EGgHTegDaAhHQJ4CunivPkd1fZQoaAZHQJq89HbypaRoB03oA2gIR0CeDgv/io87dX2UKGgGR0CfiKWcz67/aAdN6ANoCEdAnhh3C9AX23V9lChoBkdAnODfoV2zOWgHTegDaAhHQJ4cwXsPatd1fZQoaAZHQJptKFHrhR9oB03oA2gIR0CeI6NmUW2xdX2UKGgGR0CcuxJwKjSHaAdN6ANoCEdAnis3Uc4o7XV9lChoBkdAl8p9mHxjKGgHTegDaAhHQJ4zYyO7xut1fZQoaAZHQJohiWw/xDtoB03oA2gIR0CeN6KTB68hdX2UKGgGR0Cao4Zbpu/DaAdN6ANoCEdAnj+MW0qpcXV9lChoBkdAnWn8AeaKDWgHTegDaAhHQJ5Kte5WilB1fZQoaAZHQJ5OvXRPXTVoB03oA2gIR0CeVfULUkOadX2UKGgGR0Ce6gs3AEdOaAdN6ANoCEdAnlov5DZ13nV9lChoBkdAnsr/4EfT1GgHTegDaAhHQJ5hFcSoOx11fZQoaAZHQJ+FlXtBv75oB03oA2gIR0CeaJATqSowdX2UKGgGR0CbajSfUWl/aAdN6ANoCEdAnnDlLSNOunV9lChoBkdAntwUWykbgmgHTegDaAhHQJ51PsByS3d1fZQoaAZHQJoec/LTx5NoB03oA2gIR0CefI1pj+aSdX2UKGgGR0Cf5lpZOi35aAdN6ANoCEdAnoeoy9EkSnV9lChoBkdAmvuFzuF6A2gHTegDaAhHQJ6T04//vOR1fZQoaAZHQJtoGh/RVp9oB03oA2gIR0CemBMz/IbPdX2UKGgGR0Cacf/bCaZyaAdN6ANoCEdAnp8UqH4463V9lChoBkdAmhPOpKjBVWgHTegDaAhHQJ6mqe18b711fZQoaAZHQJw80AT7EYRoB03oA2gIR0CersSyt3fRdX2UKGgGR0Cc4BmhM8HOaAdN6ANoCEdAnrMPgJkXlHV9lChoBkdAlr/X3lCCz2gHTegDaAhHQJ657PgNwzd1fZQoaAZHQJ/8dbr1M/RoB03oA2gIR0CexLGLk0aZdX2UKGgGR0Ca2tkCFK02aAdN6ANoCEdAntHDYVZcLXV9lChoBkdAnAZNK28Zk2gHTegDaAhHQJ7WEOLBKth1fZQoaAZHQJWvdKkEcKhoB03oA2gIR0Ce3QOUt7KJdX2UKGgGR0Ca//MB6rvLaAdN6ANoCEdAnuS0py6tknV9lChoBkdAmqU2JvYOD2gHTegDaAhHQJ7tIqvvBrN1fZQoaAZHQJQwoyhzvJBoB03oA2gIR0Ce8XbKifxudX2UKGgGR0CXnW9DQZ4waAdN6ANoCEdAnviDdgv12HVlLg=="
67
+ },
68
+ "ep_success_buffer": {
69
+ ":type:": "<class 'collections.deque'>",
70
+ ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
71
+ },
72
+ "_n_updates": 7813,
73
+ "n_steps": 32,
74
+ "gamma": 0.995,
75
+ "gae_lambda": 0.9,
76
+ "ent_coef": 0.0,
77
+ "vf_coef": 0.4,
78
+ "max_grad_norm": 0.5,
79
+ "normalize_advantage": false,
80
  "observation_space": {
81
  ":type:": "<class 'gym.spaces.box.Box'>",
82
  ":serialized:": "gAWVZwIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgKSxyFlIwBQ5R0lFKUjARoaWdolGgSKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaApLHIWUaBV0lFKUjA1ib3VuZGVkX2JlbG93lGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCFLHIWUaBV0lFKUjApfbnBfcmFuZG9tlE51Yi4=",
 
103
  "bounded_above": "[ True True True True True True True True]",
104
  "_np_random": null
105
  },
106
+ "n_envs": 4
 
 
 
 
 
 
 
107
  }
a2c-AntBulletEnv-v0/policy.optimizer.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:763e6aa37fb83f8a1ec0724e39a562418a7d4e18293f64bc65670e025bf41c6e
3
- size 687
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ecf10d56dd97272228529235c2acfd16f69657d877b1bcbedef010a717b8120
3
+ size 56190
a2c-AntBulletEnv-v0/policy.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adae68f4c96d2af4ca3f25476232eb4447592c498f5292718c6faad54aeb6bd8
3
  size 56894
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49083f823eb11ce5d2a1959bebcf541b01ec37d92546afc61b13da1f0e6b533
3
  size 56894
config.json CHANGED
@@ -1 +1 @@
1
- {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7f124b6b04c0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f124b6b0550>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f124b6b05e0>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f124b6b0670>", "_build": "<function ActorCriticPolicy._build at 0x7f124b6b0700>", "forward": "<function ActorCriticPolicy.forward at 0x7f124b6b0790>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f124b6b0820>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f124b6b08b0>", "_predict": "<function ActorCriticPolicy._predict at 0x7f124b6b0940>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f124b6b09d0>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f124b6b0a60>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f124b6b0af0>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7f124b634800>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVowAAAAAAAAB9lCiMDGxvZ19zdGRfaW5pdJRK/v///4wKb3J0aG9faW5pdJSJjA9vcHRpbWl6ZXJfY2xhc3OUjBN0b3JjaC5vcHRpbS5ybXNwcm9wlIwHUk1TcHJvcJSTlIwQb3B0aW1pemVyX2t3YXJnc5R9lCiMBWFscGhhlEc/764UeuFHrowDZXBzlEc+5Pi1iONo8YwMd2VpZ2h0X2RlY2F5lEsAdXUu", "log_std_init": -2, "ortho_init": false, "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "num_timesteps": 0, "_total_timesteps": 0, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": null, "learning_rate": 0.00096, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/T3UQTVUdaYWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "_last_obs": null, "_last_episode_starts": null, "_last_original_obs": null, "_episode_num": 0, "use_sde": true, "sde_sample_freq": -1, "_current_progress_remaining": 1, "_stats_window_size": 100, "ep_info_buffer": null, "ep_success_buffer": null, "_n_updates": 0, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVZwIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgKSxyFlIwBQ5R0lFKUjARoaWdolGgSKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaApLHIWUaBV0lFKUjA1ib3VuZGVkX2JlbG93lGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCFLHIWUaBV0lFKUjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "_shape": [28], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf\n inf inf inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "bounded_above": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVnwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/AACAv5RoCksIhZSMAUOUdJRSlIwEaGlnaJRoEiiWIAAAAAAAAAAAAIA/AACAPwAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoCksIhZRoFXSUUpSMDWJvdW5kZWRfYmVsb3eUaBIolggAAAAAAAAAAQEBAQEBAQGUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYIAAAAAAAAAAEBAQEBAQEBlGghSwiFlGgVdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-1. -1. -1. -1. -1. -1. -1. -1.]", "high": "[1. 1. 1. 1. 1. 1. 1. 1.]", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_np_random": null}, "n_envs": 4, "n_steps": 8, "gamma": 0.99, "gae_lambda": 0.9, "ent_coef": 0.0, "vf_coef": 0.4, "max_grad_norm": 0.5, "normalize_advantage": false, "system_info": {"OS": "Linux-5.10.147+-x86_64-with-glibc2.31 # 1 SMP Sat Dec 10 16:00:40 UTC 2022", "Python": "3.9.16", "Stable-Baselines3": "1.8.0", "PyTorch": "2.0.0+cu118", "GPU Enabled": "True", "Numpy": "1.22.4", "Gym": "0.21.0"}}
 
1
+ {"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7fe4afd50040>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7fe4afd500d0>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7fe4afd50160>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7fe4afd501f0>", "_build": "<function ActorCriticPolicy._build at 0x7fe4afd50280>", "forward": "<function ActorCriticPolicy.forward at 0x7fe4afd50310>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7fe4afd503a0>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7fe4afd50430>", "_predict": "<function ActorCriticPolicy._predict at 0x7fe4afd504c0>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7fe4afd50550>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7fe4afd505e0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7fe4afd50670>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc._abc_data object at 0x7fe4afd4ff80>"}, "verbose": 1, "policy_kwargs": {":type:": "<class 'dict'>", ":serialized:": "gAWVowAAAAAAAAB9lCiMDGxvZ19zdGRfaW5pdJRK/v///4wKb3J0aG9faW5pdJSJjA9vcHRpbWl6ZXJfY2xhc3OUjBN0b3JjaC5vcHRpbS5ybXNwcm9wlIwHUk1TcHJvcJSTlIwQb3B0aW1pemVyX2t3YXJnc5R9lCiMBWFscGhhlEc/764UeuFHrowDZXBzlEc+5Pi1iONo8YwMd2VpZ2h0X2RlY2F5lEsAdXUu", "log_std_init": -2, "ortho_init": false, "optimizer_class": "<class 'torch.optim.rmsprop.RMSprop'>", "optimizer_kwargs": {"alpha": 0.99, "eps": 1e-05, "weight_decay": 0}}, "num_timesteps": 1000064, "_total_timesteps": 1000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1681378097059957234, "learning_rate": 0.001, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVwwIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZSMBGZ1bmOUS4JDAgABlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5SMSC91c3IvbG9jYWwvbGliL3B5dGhvbjMuOS9kaXN0LXBhY2thZ2VzL3N0YWJsZV9iYXNlbGluZXMzL2NvbW1vbi91dGlscy5weZR1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgffZR9lChoFmgNjAxfX3F1YWxuYW1lX1+UjBljb25zdGFudF9mbi48bG9jYWxzPi5mdW5jlIwPX19hbm5vdGF0aW9uc19flH2UjA5fX2t3ZGVmYXVsdHNfX5ROjAxfX2RlZmF1bHRzX1+UTowKX19tb2R1bGVfX5RoF4wHX19kb2NfX5ROjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "_last_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAADmUDb0+dcw9324IP9xoxD+vDhu98+LxPvT6Q78+Z5K/+i5JP+bfMj/p+Bs/l/0oPzOSFr+cfoa/G6mPPwmIFT3f1t69CY6Hv7tSlT6hhAhAvOK5P2DNf7/1yVA6z0sXQHErXj+3KQg/fyv6Pp3bjr9YvYG/+0YbPpPuBj8b9Ew/NZlhvxbugT8u5iy/GU1Gv8XzI79T8hI/nCIDP60PIz+/c36/arlYvtTLjz+o94M7pTs2PlElwr/QLsk+qeXCP+EIDD9oHA+/R1TavmlUsT9xK14/tykIP38r+j6d246/Jk8wPnM3j79Zqjw8N4mhPtblCD8tmEk/MmzpPQtY/r5VUtU+QEgMwP1L8T4xmSnAMteOv71ZoD4YdNC9RgWlPZQkOT9jLgNA9WF8PvZMFcCJKo+/ghEPPgj/XD/Ttg0+qX2Tv7cpCD9/K/o+9V9lP9aJGT9MOx++1KIHPyU+AkDyO8a/Gmnzv6KuUL44wcW/5GqsvuZbE0DO4pe+JoMJQAkeBD9GIae7dZ+PP9wHvDy9DBc+xiv7vl8kDj/AC70+SplHP2FDdD/z3GI76YASv6l9k7+3KQg/fyv6Pp3bjr+UjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdwAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYEAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwSFlIwBQ5R0lFKULg=="}, "_last_original_obs": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVNQIAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJbAAQAAAAAAAAAAAAAblxk2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACAxsnovQAAAACfzfS/AAAAAPaqrb0AAAAAb9ztPwAAAADs/Ii8AAAAACqq3j8AAAAAegOqvQAAAADchva/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA91mptQAAgD8AAAAAAAAAAAAAAAAAAAAAAAAAgOU8w70AAAAAuP/4vwAAAAC3PAO+AAAAAImh5j8AAAAAbHX3vQAAAACiYvo/AAAAAP6Oy70AAAAAYrr4vwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFaziTYAAIA/AAAAAAAAAAAAAAAAAAAAAAAAAIAAUQa+AAAAAEtg+L8AAAAAm/eVPQAAAAAz2+g/AAAAAN3flLwAAAAAiAPkPwAAAADVUoI8AAAAAFY/278AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACg6Bc2AACAPwAAAAAAAAAAAAAAAAAAAAAAAACASfsPvgAAAAD1ENq/AAAAAO1Kbb0AAAAAIJTnPwAAAAB0tOi9AAAAAL9H8T8AAAAAVxYEvgAAAAB9ee6/AAAAAAAAAAAAAAAAAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJmNJSJiIeUUpQoSwOMATyUTk5OSv////9K/////0sAdJRiSwRLHIaUjAFDlHSUUpQu"}, "_episode_num": 0, "use_sde": true, "sde_sample_freq": -1, "_current_progress_remaining": -6.4000000000064e-05, "_stats_window_size": 100, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVRAwAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKUKH2UKIwBcpRHQJ07vBoEjgSMAWyUTegDjAF0lEdAm/8BIFvAGnV9lChoBkdAnknWff4yoGgHTegDaAhHQJwIl67dzn11fZQoaAZHQJ5ZQCDEm6ZoB03oA2gIR0CcDs6FdszmdX2UKGgGR0CeSnN70Fr3aAdN6ANoCEdAnBm7IT4+KXV9lChoBkdAm1iis0YTCmgHTegDaAhHQJwiAh/y5I91fZQoaAZHQJyY+Z5Rjz9oB03oA2gIR0CcKhyRSxZ/dX2UKGgGR0CdfTGHYYixaAdN6ANoCEdAnC5bKzRhMXV9lChoBkdAmecbN4Z/C2gHTegDaAhHQJw1Uaya/h51fZQoaAZHQJ5JGNR3u/loB03oA2gIR0CcPOQV9F4LdX2UKGgGR0CeEQA6dUbUaAdN6ANoCEdAnEXSVv/BFnV9lChoBkdAm3aw1aW5Y2gHTegDaAhHQJxL5wcYIjZ1fZQoaAZHQJsU4bIcR15oB03oA2gIR0CcVqmOU+s6dX2UKGgGR0CcXCCHRCyAaAdN6ANoCEdAnF/GVJL/THV9lChoBkdAnndJMQEpzGgHTegDaAhHQJxn7N7jT8Z1fZQoaAZHQJvIda8pTddoB03oA2gIR0CcbC4pc5bRdX2UKGgGR0CeoRRwqAjIaAdN6ANoCEdAnHMcjmjj73V9lChoBkdAngizbFjur2gHTegDaAhHQJx6n+OwPiF1fZQoaAZHQJrAshpxm05oB03oA2gIR0Ccgt1nM+vAdX2UKGgGR0CbE7A3kxREaAdN6ANoCEdAnIkoBBAv+XV9lChoBkdAnM8utfXws2gHTegDaAhHQJyTxwm3OOd1fZQoaAZHQJ4s77BO58VoB03oA2gIR0CcnaC6pYLcdX2UKGgGR0CcWdJ7LMcIaAdN6ANoCEdAnKWtUjs2N3V9lChoBkdAm1XBXCCSR2gHTegDaAhHQJyp+nWJ79h1fZQoaAZHQJ/ETzK9wm5oB03oA2gIR0CcsNTaTOgQdX2UKGgGR0CaJFmhM8HOaAdN6ANoCEdAnLhnTI/7i3V9lChoBkdAnMECDmKZUmgHTegDaAhHQJzAewTufEp1fZQoaAZHQKBT5donKGNoB03oA2gIR0Ccxe5Z8rqddX2UKGgGR0CgGoT8xbjcaAdN6ANoCEdAnNAoqgAZKnV9lChoBkdAnU2k4aP0ZmgHTegDaAhHQJzbATnJT2p1fZQoaAZHQJ+WkfnwG4ZoB03oA2gIR0Cc4y1WKdhBdX2UKGgGR0CeW09aEBbOaAdN6ANoCEdAnOd7UkOZs3V9lChoBkdAnwIafra/RGgHTegDaAhHQJzuWd6LOzJ1fZQoaAZHQJ7JfOryUcJoB03oA2gIR0Cc9diwB5oodX2UKGgGR0CbvYLbHp8naAdN6ANoCEdAnP36E384xXV9lChoBkdAniev5k9U0mgHTegDaAhHQJ0Cx5Qgs9V1fZQoaAZHQJ4qaUW2w3ZoB03oA2gIR0CdDLIUrTYvdX2UKGgGR0CgMA9Kujh2aAdN6ANoCEdAnRiOHWSU1XV9lChoBkdAoAriol2NemgHTegDaAhHQJ0grhCMPz51fZQoaAZHQJ7NdqHoHLRoB03oA2gIR0CdJPCKaXrudX2UKGgGR0Cgc5BppN9IaAdN6ANoCEdAnSvPPHDJl3V9lChoBkdAn25Fiay8jGgHTegDaAhHQJ0zMPhAGB51fZQoaAZHQJ4NU/MW43FoB03oA2gIR0CdOzT+NtIkdX2UKGgGR0CeqIHGjsUqaAdN6ANoCEdAnT+Bo7FKkHV9lChoBkdAntb0q+ajOGgHTegDaAhHQJ1JCR5kbxV1fZQoaAZHQJpPrPLPldVoB03oA2gIR0CdVPV0cOsldX2UKGgGR0Cb5MA93bEhaAdN6ANoCEdAnV4FGPPszHV9lChoBkdAnDM/gaWHDmgHTegDaAhHQJ1iZVfeDWd1fZQoaAZHQJT+UzGgi/xoB03oA2gIR0CdaX0oBq9HdX2UKGgGR0CcPuVMVUMoaAdN6ANoCEdAnXEZrP+n63V9lChoBkdAk1sklVtGeGgHTegDaAhHQJ15cD4gzP91fZQoaAZHQJzweYUnG85oB03oA2gIR0Cdfcby6MBIdX2UKGgGR0CbJKMF2V3VaAdN6ANoCEdAnYcFhCtzS3V9lChoBkdAmJ1rdznzQWgHTegDaAhHQJ2S8tPHktF1fZQoaAZHQI8B/bypaRpoB03oA2gIR0CdnLQ6ZH/cdX2UKGgGR0CG6WYgJTl1aAdN6ANoCEdAnaEm51/2CnV9lChoBkdAlS89yo4uLGgHTegDaAhHQJ2oWclPact1fZQoaAZHQI313wiJO35oB03oA2gIR0CdsDFWXC0odX2UKGgGR0CQaklNlAeJaAdN6ANoCEdAnbhz7l7tzHV9lChoBkdAm25XaSLZSWgHTegDaAhHQJ28qYv38Gd1fZQoaAZHQJpWUwco6S1oB03oA2gIR0CdxauaF23bdX2UKGgGR0CeMrDx9XtCaAdN6ANoCEdAndE3meUY9HV9lChoBkdAnhotzbN8mmgHTegDaAhHQJ3a4I+nqFB1fZQoaAZHQJ0tKwTufEpoB03oA2gIR0Cd3yKv3ai9dX2UKGgGR0CcGr5HVf/naAdN6ANoCEdAneX9yPuG9HV9lChoBkdAm4kf+bVjJGgHTegDaAhHQJ3totsenyd1fZQoaAZHQJqTUzKs+3ZoB03oA2gIR0Cd9dJJ5E+gdX2UKGgGR0CdBBPJJXhgaAdN6ANoCEdAnfozWXkYGnV9lChoBkdAnEohAGB4EGgHTegDaAhHQJ4CunivPkd1fZQoaAZHQJq89HbypaRoB03oA2gIR0CeDgv/io87dX2UKGgGR0CfiKWcz67/aAdN6ANoCEdAnhh3C9AX23V9lChoBkdAnODfoV2zOWgHTegDaAhHQJ4cwXsPatd1fZQoaAZHQJptKFHrhR9oB03oA2gIR0CeI6NmUW2xdX2UKGgGR0CcuxJwKjSHaAdN6ANoCEdAnis3Uc4o7XV9lChoBkdAl8p9mHxjKGgHTegDaAhHQJ4zYyO7xut1fZQoaAZHQJohiWw/xDtoB03oA2gIR0CeN6KTB68hdX2UKGgGR0Cao4Zbpu/DaAdN6ANoCEdAnj+MW0qpcXV9lChoBkdAnWn8AeaKDWgHTegDaAhHQJ5Kte5WilB1fZQoaAZHQJ5OvXRPXTVoB03oA2gIR0CeVfULUkOadX2UKGgGR0Ce6gs3AEdOaAdN6ANoCEdAnlov5DZ13nV9lChoBkdAnsr/4EfT1GgHTegDaAhHQJ5hFcSoOx11fZQoaAZHQJ+FlXtBv75oB03oA2gIR0CeaJATqSowdX2UKGgGR0CbajSfUWl/aAdN6ANoCEdAnnDlLSNOunV9lChoBkdAntwUWykbgmgHTegDaAhHQJ51PsByS3d1fZQoaAZHQJoec/LTx5NoB03oA2gIR0CefI1pj+aSdX2UKGgGR0Cf5lpZOi35aAdN6ANoCEdAnoeoy9EkSnV9lChoBkdAmvuFzuF6A2gHTegDaAhHQJ6T04//vOR1fZQoaAZHQJtoGh/RVp9oB03oA2gIR0CemBMz/IbPdX2UKGgGR0Cacf/bCaZyaAdN6ANoCEdAnp8UqH4463V9lChoBkdAmhPOpKjBVWgHTegDaAhHQJ6mqe18b711fZQoaAZHQJw80AT7EYRoB03oA2gIR0CersSyt3fRdX2UKGgGR0Cc4BmhM8HOaAdN6ANoCEdAnrMPgJkXlHV9lChoBkdAlr/X3lCCz2gHTegDaAhHQJ657PgNwzd1fZQoaAZHQJ/8dbr1M/RoB03oA2gIR0CexLGLk0aZdX2UKGgGR0Ca2tkCFK02aAdN6ANoCEdAntHDYVZcLXV9lChoBkdAnAZNK28Zk2gHTegDaAhHQJ7WEOLBKth1fZQoaAZHQJWvdKkEcKhoB03oA2gIR0Ce3QOUt7KJdX2UKGgGR0Ca//MB6rvLaAdN6ANoCEdAnuS0py6tknV9lChoBkdAmqU2JvYOD2gHTegDaAhHQJ7tIqvvBrN1fZQoaAZHQJQwoyhzvJBoB03oA2gIR0Ce8XbKifxudX2UKGgGR0CXnW9DQZ4waAdN6ANoCEdAnviDdgv12HVlLg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 7813, "n_steps": 32, "gamma": 0.995, "gae_lambda": 0.9, "ent_coef": 0.0, "vf_coef": 0.4, "max_grad_norm": 0.5, "normalize_advantage": false, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVZwIAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLHIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWcAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/lGgKSxyFlIwBQ5R0lFKUjARoaWdolGgSKJZwAAAAAAAAAAAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAfwAAgH+UaApLHIWUaBV0lFKUjA1ib3VuZGVkX2JlbG93lGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLHIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACUaCFLHIWUaBV0lFKUjApfbnBfcmFuZG9tlE51Yi4=", "dtype": "float32", "_shape": [28], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf\n -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf inf\n inf inf inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "bounded_above": "[False False False False False False False False False False False False\n False False False False False False False False False False False False\n False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVnwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAIC/AACAvwAAgL8AAIC/AACAvwAAgL8AAIC/AACAv5RoCksIhZSMAUOUdJRSlIwEaGlnaJRoEiiWIAAAAAAAAAAAAIA/AACAPwAAgD8AAIA/AACAPwAAgD8AAIA/AACAP5RoCksIhZRoFXSUUpSMDWJvdW5kZWRfYmVsb3eUaBIolggAAAAAAAAAAQEBAQEBAQGUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYIAAAAAAAAAAEBAQEBAQEBlGghSwiFlGgVdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-1. -1. -1. -1. -1. -1. -1. -1.]", "high": "[1. 1. 1. 1. 1. 1. 1. 1.]", "bounded_below": "[ True True True True True True True True]", "bounded_above": "[ True True True True True True True True]", "_np_random": null}, "n_envs": 4, "system_info": {"OS": "Linux-5.10.147+-x86_64-with-glibc2.31 # 1 SMP Sat Dec 10 16:00:40 UTC 2022", "Python": "3.9.16", "Stable-Baselines3": "1.8.0", "PyTorch": "2.0.0+cu118", "GPU Enabled": "True", "Numpy": "1.22.4", "Gym": "0.21.0"}}
replay.mp4 CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
 
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 36.602076078951356, "std_reward": 32.03419773012497, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-04-12T09:11:45.514883"}
 
1
+ {"mean_reward": 2043.587832799938, "std_reward": 93.93421232846408, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-04-13T10:15:25.183380"}
vec_normalize.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69353aebf128ae93a38070501555cf81da6f7bb2965dc7b9d7d52ee96bc9fdc1
3
  size 2170
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e2976b4da547daedad44c83137db83c349997e3d42f09a63addbb784a5ce00
3
  size 2170