me-in-u commited on
Commit
4428be7
·
1 Parent(s): 0620f0f

Initial commit

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: stable-baselines3
3
+ tags:
4
+ - PandaPickAndPlace-v1
5
+ - deep-reinforcement-learning
6
+ - reinforcement-learning
7
+ - stable-baselines3
8
+ model-index:
9
+ - name: TQC
10
+ results:
11
+ - metrics:
12
+ - type: mean_reward
13
+ value: -13.20 +/- 10.05
14
+ name: mean_reward
15
+ task:
16
+ type: reinforcement-learning
17
+ name: reinforcement-learning
18
+ dataset:
19
+ name: PandaPickAndPlace-v1
20
+ type: PandaPickAndPlace-v1
21
+ ---
22
+
23
+ # **TQC** Agent playing **PandaPickAndPlace-v1**
24
+ This is a trained model of a **TQC** agent playing **PandaPickAndPlace-v1**
25
+ using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3)
26
+ and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo).
27
+
28
+ The RL Zoo is a training framework for Stable Baselines3
29
+ reinforcement learning agents,
30
+ with hyperparameter optimization and pre-trained agents included.
31
+
32
+ ## Usage (with SB3 RL Zoo)
33
+
34
+ RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
35
+ SB3: https://github.com/DLR-RM/stable-baselines3<br/>
36
+ SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
37
+
38
+ ```
39
+ # Download model and save it into the logs/ folder
40
+ python -m utils.load_from_hub --algo tqc --env PandaPickAndPlace-v1 -orga me-in-u -f logs/
41
+ python enjoy.py --algo tqc --env PandaPickAndPlace-v1 -f logs/
42
+ ```
43
+
44
+ ## Training (with the RL Zoo)
45
+ ```
46
+ python train.py --algo tqc --env PandaPickAndPlace-v1 -f logs/
47
+ # Upload the model and generate video (when possible)
48
+ python -m utils.push_to_hub --algo tqc --env PandaPickAndPlace-v1 -f logs/ -orga me-in-u
49
+ ```
50
+
51
+ ## Hyperparameters
52
+ ```python
53
+ OrderedDict([('batch_size', 2048),
54
+ ('buffer_size', 1000000),
55
+ ('env_wrapper', 'sb3_contrib.common.wrappers.TimeFeatureWrapper'),
56
+ ('gamma', 0.95),
57
+ ('learning_rate', 0.001),
58
+ ('n_timesteps', 1000000.0),
59
+ ('policy', 'MultiInputPolicy'),
60
+ ('policy_kwargs', 'dict(net_arch=[512, 512, 512], n_critics=2)'),
61
+ ('replay_buffer_class', 'HerReplayBuffer'),
62
+ ('replay_buffer_kwargs',
63
+ "dict( online_sampling=True, goal_selection_strategy='future', "
64
+ 'n_sampled_goal=4, )'),
65
+ ('tau', 0.05),
66
+ ('normalize', False)])
67
+ ```
results.json CHANGED
@@ -1 +1 @@
1
- {"mean_reward": -13.2, "std_reward": 10.047885349664377, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-04-18T00:56:38.463820"}
 
1
+ {"mean_reward": -13.2, "std_reward": 10.047885349664377, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2024-04-18T01:02:04.655690"}
tqc-PandaPickAndPlace-v1.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd40a2359314920882b1615fe3943ded35ec8d89aae2c6743a23c365401e230b
3
  size 24309869
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88678aa5a36e7e22d3d9ea93ea3ed6ce971d644dc4616e3c22d34ccb4e62c64f
3
  size 24309869
tqc-PandaPickAndPlace-v1/data CHANGED
@@ -4,9 +4,9 @@
4
  ":serialized:": "gASVMQAAAAAAAACMGHNiM19jb250cmliLnRxYy5wb2xpY2llc5SMEE11bHRpSW5wdXRQb2xpY3mUk5Qu",
5
  "__module__": "sb3_contrib.tqc.policies",
6
  "__doc__": "\n Policy class (with both actor and critic) for TQC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_quantiles: Number of quantiles for the critic.\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
7
- "__init__": "<function MultiInputPolicy.__init__ at 0x000001B4FC7A5CA8>",
8
  "__abstractmethods__": "frozenset()",
9
- "_abc_impl": "<_abc_data object at 0x000001B4FC7996C0>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
@@ -92,24 +92,24 @@
92
  ":serialized:": "gASVPwAAAAAAAACMJ3N0YWJsZV9iYXNlbGluZXMzLmhlci5oZXJfcmVwbGF5X2J1ZmZlcpSMD0hlclJlcGxheUJ1ZmZlcpSTlC4=",
93
  "__module__": "stable_baselines3.her.her_replay_buffer",
94
  "__doc__": "\n Hindsight Experience Replay (HER) buffer.\n Paper: https://arxiv.org/abs/1707.01495\n\n .. warning::\n\n For performance reasons, the maximum number of steps per episodes must be specified.\n In most cases, it will be inferred if you specify ``max_episode_steps`` when registering the environment\n or if you use a ``gym.wrappers.TimeLimit`` (and ``env.spec`` is not None).\n Otherwise, you can directly pass ``max_episode_length`` to the replay buffer constructor.\n\n\n Replay buffer for sampling HER (Hindsight Experience Replay) transitions.\n In the online sampling case, these new transitions will not be saved in the replay buffer\n and will only be created at sampling time.\n\n :param env: The training environment\n :param buffer_size: The size of the buffer measured in transitions.\n :param max_episode_length: The maximum length of an episode. If not specified,\n it will be automatically inferred if the environment uses a ``gym.wrappers.TimeLimit`` wrapper.\n :param goal_selection_strategy: Strategy for sampling goals for replay.\n One of ['episode', 'final', 'future']\n :param device: PyTorch device\n :param n_sampled_goal: Number of virtual transitions to create per real transition,\n by sampling new goals.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
95
- "__init__": "<function HerReplayBuffer.__init__ at 0x000001B4FB31DDC8>",
96
- "__getstate__": "<function HerReplayBuffer.__getstate__ at 0x000001B4FB31DE58>",
97
- "__setstate__": "<function HerReplayBuffer.__setstate__ at 0x000001B4FB31DEE8>",
98
- "set_env": "<function HerReplayBuffer.set_env at 0x000001B4FB31DF78>",
99
- "_get_samples": "<function HerReplayBuffer._get_samples at 0x000001B4FB321048>",
100
- "sample": "<function HerReplayBuffer.sample at 0x000001B4FB3210D8>",
101
- "_sample_offline": "<function HerReplayBuffer._sample_offline at 0x000001B4FB321168>",
102
- "sample_goals": "<function HerReplayBuffer.sample_goals at 0x000001B4FB3211F8>",
103
- "_sample_transitions": "<function HerReplayBuffer._sample_transitions at 0x000001B4FB321288>",
104
- "add": "<function HerReplayBuffer.add at 0x000001B4FB321318>",
105
- "store_episode": "<function HerReplayBuffer.store_episode at 0x000001B4FB3213A8>",
106
- "_sample_her_transitions": "<function HerReplayBuffer._sample_her_transitions at 0x000001B4FB321438>",
107
- "n_episodes_stored": "<property object at 0x000001B4FB3242C8>",
108
- "size": "<function HerReplayBuffer.size at 0x000001B4FB321558>",
109
- "reset": "<function HerReplayBuffer.reset at 0x000001B4FB3215E8>",
110
- "truncate_last_trajectory": "<function HerReplayBuffer.truncate_last_trajectory at 0x000001B4FB321678>",
111
  "__abstractmethods__": "frozenset()",
112
- "_abc_impl": "<_abc_data object at 0x000001B4FB3118D0>"
113
  },
114
  "replay_buffer_kwargs": {
115
  "online_sampling": true,
 
4
  ":serialized:": "gASVMQAAAAAAAACMGHNiM19jb250cmliLnRxYy5wb2xpY2llc5SMEE11bHRpSW5wdXRQb2xpY3mUk5Qu",
5
  "__module__": "sb3_contrib.tqc.policies",
6
  "__doc__": "\n Policy class (with both actor and critic) for TQC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_quantiles: Number of quantiles for the critic.\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n ",
7
+ "__init__": "<function MultiInputPolicy.__init__ at 0x0000021673966D38>",
8
  "__abstractmethods__": "frozenset()",
9
+ "_abc_impl": "<_abc_data object at 0x000002167395A6C0>"
10
  },
11
  "verbose": 1,
12
  "policy_kwargs": {
 
92
  ":serialized:": "gASVPwAAAAAAAACMJ3N0YWJsZV9iYXNlbGluZXMzLmhlci5oZXJfcmVwbGF5X2J1ZmZlcpSMD0hlclJlcGxheUJ1ZmZlcpSTlC4=",
93
  "__module__": "stable_baselines3.her.her_replay_buffer",
94
  "__doc__": "\n Hindsight Experience Replay (HER) buffer.\n Paper: https://arxiv.org/abs/1707.01495\n\n .. warning::\n\n For performance reasons, the maximum number of steps per episodes must be specified.\n In most cases, it will be inferred if you specify ``max_episode_steps`` when registering the environment\n or if you use a ``gym.wrappers.TimeLimit`` (and ``env.spec`` is not None).\n Otherwise, you can directly pass ``max_episode_length`` to the replay buffer constructor.\n\n\n Replay buffer for sampling HER (Hindsight Experience Replay) transitions.\n In the online sampling case, these new transitions will not be saved in the replay buffer\n and will only be created at sampling time.\n\n :param env: The training environment\n :param buffer_size: The size of the buffer measured in transitions.\n :param max_episode_length: The maximum length of an episode. If not specified,\n it will be automatically inferred if the environment uses a ``gym.wrappers.TimeLimit`` wrapper.\n :param goal_selection_strategy: Strategy for sampling goals for replay.\n One of ['episode', 'final', 'future']\n :param device: PyTorch device\n :param n_sampled_goal: Number of virtual transitions to create per real transition,\n by sampling new goals.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n ",
95
+ "__init__": "<function HerReplayBuffer.__init__ at 0x00000216724DCE58>",
96
+ "__getstate__": "<function HerReplayBuffer.__getstate__ at 0x00000216724DCEE8>",
97
+ "__setstate__": "<function HerReplayBuffer.__setstate__ at 0x00000216724DCF78>",
98
+ "set_env": "<function HerReplayBuffer.set_env at 0x00000216724E2048>",
99
+ "_get_samples": "<function HerReplayBuffer._get_samples at 0x00000216724E20D8>",
100
+ "sample": "<function HerReplayBuffer.sample at 0x00000216724E2168>",
101
+ "_sample_offline": "<function HerReplayBuffer._sample_offline at 0x00000216724E21F8>",
102
+ "sample_goals": "<function HerReplayBuffer.sample_goals at 0x00000216724E2288>",
103
+ "_sample_transitions": "<function HerReplayBuffer._sample_transitions at 0x00000216724E2318>",
104
+ "add": "<function HerReplayBuffer.add at 0x00000216724E23A8>",
105
+ "store_episode": "<function HerReplayBuffer.store_episode at 0x00000216724E2438>",
106
+ "_sample_her_transitions": "<function HerReplayBuffer._sample_her_transitions at 0x00000216724E24C8>",
107
+ "n_episodes_stored": "<property object at 0x00000216724E4408>",
108
+ "size": "<function HerReplayBuffer.size at 0x00000216724E25E8>",
109
+ "reset": "<function HerReplayBuffer.reset at 0x00000216724E2678>",
110
+ "truncate_last_trajectory": "<function HerReplayBuffer.truncate_last_trajectory at 0x00000216724E2708>",
111
  "__abstractmethods__": "frozenset()",
112
+ "_abc_impl": "<_abc_data object at 0x00000216724D28D0>"
113
  },
114
  "replay_buffer_kwargs": {
115
  "online_sampling": true,