ernestum commited on
Commit
72d3b3f
·
1 Parent(s): 007320e

Initial commit

Browse files
README.md CHANGED
@@ -77,3 +77,8 @@ OrderedDict([('batch_size', 128),
77
  ('train_freq', 256),
78
  ('normalize', False)])
79
  ```
 
 
 
 
 
 
77
  ('train_freq', 256),
78
  ('normalize', False)])
79
  ```
80
+
81
+ # Environment Arguments
82
+ ```python
83
+ {'render_mode': 'rgb_array'}
84
+ ```
env_kwargs.yml CHANGED
@@ -1 +1 @@
1
- {}
 
1
+ render_mode: rgb_array
replay.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae31f2f40c11cde1dbcfdd79b2b17ca41521be424e26584eb281317f745a1d16
3
+ size 435245
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": 28.9023299, "std_reward": 1.6711585563886182, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-15T13:55:41.749317"}
 
1
+ {"mean_reward": 28.9023299, "std_reward": 1.6711585563886182, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-09-18T09:55:50.831805"}
sac-seals-Swimmer-v1.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c995510e331e9b3b1660f1e4df2df5e89512b21af0c698b852460bceb9d07e6
3
- size 5580789
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1f4409a2d8c327f0b51913d10353beadf6d5f856451d53c43eeb26c67fb8e1
3
+ size 5580793
sac-seals-Swimmer-v1/_stable_baselines3_version CHANGED
@@ -1 +1 @@
1
- 2.1.0
 
1
+ 2.2.0a3
sac-seals-Swimmer-v1/data CHANGED
@@ -5,17 +5,17 @@
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
- "__init__": "<function SACPolicy.__init__ at 0x7f739ad838b0>",
9
- "_build": "<function SACPolicy._build at 0x7f739ad83940>",
10
- "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7f739ad839d0>",
11
- "reset_noise": "<function SACPolicy.reset_noise at 0x7f739ad83a60>",
12
- "make_actor": "<function SACPolicy.make_actor at 0x7f739ad83af0>",
13
- "make_critic": "<function SACPolicy.make_critic at 0x7f739ad83b80>",
14
- "forward": "<function SACPolicy.forward at 0x7f739ad83c10>",
15
- "_predict": "<function SACPolicy._predict at 0x7f739ad83ca0>",
16
- "set_training_mode": "<function SACPolicy.set_training_mode at 0x7f739ad83d30>",
17
  "__abstractmethods__": "frozenset()",
18
- "_abc_impl": "<_abc_data object at 0x7f739ad84870>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
@@ -103,13 +103,13 @@
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
- "__init__": "<function ReplayBuffer.__init__ at 0x7f739ad4d820>",
107
- "add": "<function ReplayBuffer.add at 0x7f739ad4d8b0>",
108
- "sample": "<function ReplayBuffer.sample at 0x7f739ad4d940>",
109
- "_get_samples": "<function ReplayBuffer._get_samples at 0x7f739ad4d9d0>",
110
- "_maybe_cast_dtype": "<staticmethod object at 0x7f739ad53580>",
111
  "__abstractmethods__": "frozenset()",
112
- "_abc_impl": "<_abc_data object at 0x7f739ad535a0>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
 
5
  "__module__": "stable_baselines3.sac.policies",
6
  "__annotations__": "{'actor': <class 'stable_baselines3.sac.policies.Actor'>, 'critic': <class 'stable_baselines3.common.policies.ContinuousCritic'>, 'critic_target': <class 'stable_baselines3.common.policies.ContinuousCritic'>}",
7
  "__doc__": "\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
8
+ "__init__": "<function SACPolicy.__init__ at 0x7f530a866700>",
9
+ "_build": "<function SACPolicy._build at 0x7f530a866790>",
10
+ "_get_constructor_parameters": "<function SACPolicy._get_constructor_parameters at 0x7f530a866820>",
11
+ "reset_noise": "<function SACPolicy.reset_noise at 0x7f530a8668b0>",
12
+ "make_actor": "<function SACPolicy.make_actor at 0x7f530a866940>",
13
+ "make_critic": "<function SACPolicy.make_critic at 0x7f530a8669d0>",
14
+ "forward": "<function SACPolicy.forward at 0x7f530a866a60>",
15
+ "_predict": "<function SACPolicy._predict at 0x7f530a866af0>",
16
+ "set_training_mode": "<function SACPolicy.set_training_mode at 0x7f530a866b80>",
17
  "__abstractmethods__": "frozenset()",
18
+ "_abc_impl": "<_abc_data object at 0x7f530a85cb70>"
19
  },
20
  "verbose": 1,
21
  "policy_kwargs": {
 
103
  ":serialized:": "gAWVNQAAAAAAAACMIHN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5idWZmZXJzlIwMUmVwbGF5QnVmZmVylJOULg==",
104
  "__module__": "stable_baselines3.common.buffers",
105
  "__doc__": "\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
106
+ "__init__": "<function ReplayBuffer.__init__ at 0x7f530a8b3700>",
107
+ "add": "<function ReplayBuffer.add at 0x7f530a8b3790>",
108
+ "sample": "<function ReplayBuffer.sample at 0x7f530a8b3820>",
109
+ "_get_samples": "<function ReplayBuffer._get_samples at 0x7f530a8b38b0>",
110
+ "_maybe_cast_dtype": "<staticmethod object at 0x7f530a8ad880>",
111
  "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc_data object at 0x7f530a8ad8a0>"
113
  },
114
  "replay_buffer_kwargs": {},
115
  "train_freq": {
sac-seals-Swimmer-v1/system_info.txt CHANGED
@@ -1,6 +1,6 @@
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
- - Stable-Baselines3: 2.1.0
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
 
1
  - OS: Linux-5.4.0-156-generic-x86_64-with-glibc2.29 # 173-Ubuntu SMP Tue Jul 11 07:25:22 UTC 2023
2
  - Python: 3.8.10
3
+ - Stable-Baselines3: 2.2.0a3
4
  - PyTorch: 2.0.1+cu117
5
  - GPU Enabled: False
6
  - Numpy: 1.24.4
train_eval_metrics.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fd65d60136370c24d24b1bbdd4d9b6ea00503f6a2dc4bb79758537234f8795e
3
  size 27986
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc0381fc9e16ccda3dd2904756e8ac0b4ebe97bff618441d0315930299befcad
3
  size 27986