modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Berzerk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Berzerk-v5 type: Berzerk-v5 metrics: - type: mean_reward value: 979.00 +/- 208.01 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Berzerk-v5** This is a trained model of a PPO agent playing Berzerk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Berzerk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Berzerk-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Berzerk-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - Assault-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Assault-v5 type: Assault-v5 metrics: - type: mean_reward value: 15962.70 +/- 5151.99 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Assault-v5** This is a trained model of a PPO agent playing Assault-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Assault-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Assault-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Assault-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Assault-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Assault-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Assault-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
AnonymousSub/rule_based_twostagetriplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - Centipede-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Centipede-v5 type: Centipede-v5 metrics: - type: mean_reward value: 7344.90 +/- 4582.18 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Centipede-v5** This is a trained model of a PPO agent playing Centipede-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Centipede-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Centipede-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Centipede-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - generated_from_trainer datasets: - HiTZ/alpaca_mt model-index: - name: alpaca-lora-65b-en-pt-es-ca results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # alpaca-lora-65b-en-pt-es-ca This model is a fine-tuned version of [/gaueko1/hizkuntza-ereduak/LLaMA/lm/huggingface/65B](https://huggingface.co//gaueko1/hizkuntza-ereduak/LLaMA/lm/huggingface/65B) on the HiTZ/alpaca_mt ['en', 'pt', 'es', 'ca'] dataset. It achieves the following results on the evaluation set: - Loss: 0.7271 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 63 - total_train_batch_size: 126 - total_eval_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.8069 | 0.06 | 100 | 0.8033 | | 0.8008 | 0.13 | 200 | 0.7826 | | 0.7687 | 0.19 | 300 | 0.7721 | | 0.7719 | 0.25 | 400 | 0.7647 | | 0.7585 | 0.32 | 500 | 0.7588 | | 0.7578 | 0.38 | 600 | 0.7537 | | 0.7505 | 0.44 | 700 | 0.7491 | | 0.7531 | 0.51 | 800 | 0.7449 | | 0.7394 | 0.57 | 900 | 0.7416 | | 0.7368 | 0.63 | 1000 | 0.7387 | | 0.7412 | 0.69 | 1100 | 0.7361 | | 0.7344 | 0.76 | 1200 | 0.7288 | | 0.7383 | 0.82 | 1300 | 0.7281 | | 0.7378 | 0.88 | 1400 | 0.7274 | | 0.7204 | 0.95 | 1500 | 0.7271 | ### Framework versions - Transformers 4.28.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.10.1 - Tokenizers 0.13.2
AnonymousSub/specter-bert-model
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - DemonAttack-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DemonAttack-v5 type: DemonAttack-v5 metrics: - type: mean_reward value: 131815.00 +/- 2301.71 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DemonAttack-v5** This is a trained model of a PPO agent playing DemonAttack-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id DemonAttack-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DemonAttack-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DemonAttack-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
ArBert/roberta-base-finetuned-ner-agglo-twitter
[ "pytorch", "tensorboard", "roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Seaquest-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Seaquest-v5 type: Seaquest-v5 metrics: - type: mean_reward value: 1760.00 +/- 15.49 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Seaquest-v5** This is a trained model of a PPO agent playing Seaquest-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Seaquest-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Seaquest-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Seaquest-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Seaquest-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Seaquest-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Seaquest-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
AragornII/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: droid22/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Aran/DialoGPT-medium-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-25T16:02:28Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: nikgeo/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Aran/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-25T16:04:10Z
--- license: mit tags: - generated_from_trainer metrics: - bleu model-index: - name: opus-mt-en-mul-finetuned-en-to-lfn results: [] language: - en - lfn pipeline_tag: translation --- # opus-mt-en-mul-finetuned-en-to-lfn This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-mul](https://huggingface.co/Helsinki-NLP/opus-mt-en-mul) on the Tatoeba English-Elefen sentence pair dataset. It achieves the following results on the evaluation set: - Loss: 0.6208 - Bleu: 62.9717 - Gen Len: 11.5165 ## Model description Elefen (or Lingua Franca Nova, abbreviated to "LFN") is a simple language designed for international communication. Its vocabulary is based on Catalan, Spanish, French, Italian and Portuguese. The grammar is very reduced, similar to Romance creoles. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
ArashEsk95/bert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 45 with parameters: ``` {'batch_size': 128, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 23, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
AriakimTaiyo/DialoGPT-small-Kumiko
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - CrazyClimber-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CrazyClimber-v5 type: CrazyClimber-v5 metrics: - type: mean_reward value: 94300.00 +/- 24398.98 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **CrazyClimber-v5** This is a trained model of a PPO agent playing CrazyClimber-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id CrazyClimber-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/CrazyClimber-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/CrazyClimber-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/CrazyClimber-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id CrazyClimber-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'CrazyClimber-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Aries/T5_question_generation
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
13
2023-03-25T16:19:29Z
--- tags: - Berzerk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Berzerk-v5 type: Berzerk-v5 metrics: - type: mean_reward value: 541.00 +/- 126.37 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Berzerk-v5** This is a trained model of a PPO agent playing Berzerk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Berzerk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Berzerk-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Berzerk-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
ArjunKadya/HuggingFace
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-25T16:19:39Z
--- tags: - Berzerk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Berzerk-v5 type: Berzerk-v5 metrics: - type: mean_reward value: 518.00 +/- 109.34 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Berzerk-v5** This is a trained model of a PPO agent playing Berzerk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Berzerk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Berzerk-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Berzerk-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
ArnaudPannatier/MLPMixer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Centipede-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Centipede-v5 type: Centipede-v5 metrics: - type: mean_reward value: 3047.30 +/- 2437.79 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Centipede-v5** This is a trained model of a PPO agent playing Centipede-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Centipede-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Centipede-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Centipede-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Arnold/common_voiceha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Centipede-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Centipede-v5 type: Centipede-v5 metrics: - type: mean_reward value: 2054.30 +/- 809.44 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Centipede-v5** This is a trained model of a PPO agent playing Centipede-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Centipede-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Centipede-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Centipede-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Arnold/wav2vec2-hausa-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Centipede-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Centipede-v5 type: Centipede-v5 metrics: - type: mean_reward value: 1585.70 +/- 580.83 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Centipede-v5** This is a trained model of a PPO agent playing Centipede-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Centipede-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Centipede-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Centipede-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Centipede-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Arnold/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - BeamRider-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: BeamRider-v5 type: BeamRider-v5 metrics: - type: mean_reward value: 4463.00 +/- 1967.26 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **BeamRider-v5** This is a trained model of a PPO agent playing BeamRider-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id BeamRider-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/BeamRider-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/BeamRider-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/BeamRider-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id BeamRider-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'BeamRider-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Aron/distilbert-base-uncased-finetuned-emotion
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- tags: - DemonAttack-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DemonAttack-v5 type: DemonAttack-v5 metrics: - type: mean_reward value: 57116.00 +/- 27738.72 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DemonAttack-v5** This is a trained model of a PPO agent playing DemonAttack-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id DemonAttack-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DemonAttack-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DemonAttack-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Atarax/rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Jamesbond-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Jamesbond-v5 type: Jamesbond-v5 metrics: - type: mean_reward value: 465.00 +/- 118.43 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Jamesbond-v5** This is a trained model of a PPO agent playing Jamesbond-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Jamesbond-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Jamesbond-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Jamesbond-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Atchuth/DialoGPT-small-MBOT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Jamesbond-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Jamesbond-v5 type: Jamesbond-v5 metrics: - type: mean_reward value: 465.00 +/- 128.55 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Jamesbond-v5** This is a trained model of a PPO agent playing Jamesbond-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Jamesbond-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Jamesbond-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Jamesbond-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Jamesbond-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ateeb/SquadQA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Kangaroo-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Kangaroo-v5 type: Kangaroo-v5 metrics: - type: mean_reward value: 1600.00 +/- 282.84 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Kangaroo-v5** This is a trained model of a PPO agent playing Kangaroo-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Kangaroo-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Kangaroo-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Kangaroo-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Kangaroo-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Kangaroo-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Kangaroo-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Augustvember/WokkaBot4
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - KungFuMaster-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: KungFuMaster-v5 type: KungFuMaster-v5 metrics: - type: mean_reward value: 19080.00 +/- 6065.28 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **KungFuMaster-v5** This is a trained model of a PPO agent playing KungFuMaster-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id KungFuMaster-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id KungFuMaster-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'KungFuMaster-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Augustvember/WokkaBot5
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - KungFuMaster-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: KungFuMaster-v5 type: KungFuMaster-v5 metrics: - type: mean_reward value: 25720.00 +/- 5122.27 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **KungFuMaster-v5** This is a trained model of a PPO agent playing KungFuMaster-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id KungFuMaster-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/KungFuMaster-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id KungFuMaster-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'KungFuMaster-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Aviora/phobert-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-25T17:15:35Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: droid22/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Awsaf/DialoGPT-medium-eren
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - SpaceInvaders-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvaders-v5 type: SpaceInvaders-v5 metrics: - type: mean_reward value: 8762.50 +/- 5908.55 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **SpaceInvaders-v5** This is a trained model of a PPO agent playing SpaceInvaders-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id SpaceInvaders-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id SpaceInvaders-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'SpaceInvaders-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Axcel/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - StarGunner-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: StarGunner-v5 type: StarGunner-v5 metrics: - type: mean_reward value: 66420.00 +/- 7673.43 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **StarGunner-v5** This is a trained model of a PPO agent playing StarGunner-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id StarGunner-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/StarGunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/StarGunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/StarGunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id StarGunner-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'StarGunner-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Axon/resnet18-v1
[ "dataset:ImageNet", "arxiv:1512.03385", "Axon", "Elixir", "license:apache-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-25T17:16:49Z
--- tags: - SpaceInvaders-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvaders-v5 type: SpaceInvaders-v5 metrics: - type: mean_reward value: 7318.50 +/- 6248.69 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **SpaceInvaders-v5** This is a trained model of a PPO agent playing SpaceInvaders-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id SpaceInvaders-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/SpaceInvaders-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id SpaceInvaders-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'SpaceInvaders-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Aybars/ModelOnWhole
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - Surround-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Surround-v5 type: Surround-v5 metrics: - type: mean_reward value: 5.30 +/- 2.69 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Surround-v5** This is a trained model of a PPO agent playing Surround-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Surround-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Surround-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Surround-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/albert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - Venture-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Venture-v5 type: Venture-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Venture-v5** This is a trained model of a PPO agent playing Venture-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Venture-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Venture-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Venture-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Venture-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Venture-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Venture-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/ernie_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit language: - en pipeline_tag: text2text-generation tags: - legal --- # flan-t5-cbp-lkg-small Google's Flan T5 model ([flan-t5-small](https://huggingface.co/google/flan-t5-small)) trained over a Legal Knowledge Graph using the training method used for [KGT-5](https://huggingface.co/spaces/apoorvumang/kgt5)
Ayham/roberta_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -138.81 +/- 79.09 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'butchland/unit8-LunarLander-v2' 'batch_size': 512 'minibatch_size': 128} ```
Ayham/roberta_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-25T17:46:52Z
--- tags: - DoubleDunk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DoubleDunk-v5 type: DoubleDunk-v5 metrics: - type: mean_reward value: -5.40 +/- 4.10 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DoubleDunk-v5** This is a trained model of a PPO agent playing DoubleDunk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id DoubleDunk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DoubleDunk-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DoubleDunk-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/roberta_gpt2_new_max64_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: prueba1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # prueba1 This model is a fine-tuned version of [PlanTL-GOB-ES/bsc-bio-ehr-es-pharmaconer](https://huggingface.co/PlanTL-GOB-ES/bsc-bio-ehr-es-pharmaconer) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1842 - Precision: 0.7072 - Recall: 0.6255 - F1: 0.6638 - Accuracy: 0.9724 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3.5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 32 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 29 | 0.1520 | 0.5625 | 0.6813 | 0.6162 | 0.9659 | | No log | 2.0 | 58 | 0.1552 | 0.6293 | 0.5817 | 0.6046 | 0.9686 | | No log | 3.0 | 87 | 0.1586 | 0.6667 | 0.5737 | 0.6167 | 0.9709 | | No log | 4.0 | 116 | 0.1595 | 0.6981 | 0.5896 | 0.6393 | 0.9722 | | No log | 5.0 | 145 | 0.1699 | 0.6729 | 0.5737 | 0.6194 | 0.9676 | | No log | 6.0 | 174 | 0.1753 | 0.6577 | 0.5817 | 0.6173 | 0.9689 | | No log | 7.0 | 203 | 0.1665 | 0.6540 | 0.6175 | 0.6352 | 0.9681 | | No log | 8.0 | 232 | 0.1792 | 0.7157 | 0.5618 | 0.6295 | 0.9712 | | No log | 9.0 | 261 | 0.1682 | 0.7048 | 0.5896 | 0.6421 | 0.9714 | | No log | 10.0 | 290 | 0.1732 | 0.7366 | 0.6016 | 0.6623 | 0.9724 | | No log | 11.0 | 319 | 0.1663 | 0.672 | 0.6693 | 0.6707 | 0.9725 | | No log | 12.0 | 348 | 0.1882 | 0.7071 | 0.5578 | 0.6236 | 0.9692 | | No log | 13.0 | 377 | 0.1825 | 0.7103 | 0.6056 | 0.6538 | 0.9710 | | No log | 14.0 | 406 | 0.1755 | 0.7164 | 0.5737 | 0.6372 | 0.9709 | | No log | 15.0 | 435 | 0.1950 | 0.6842 | 0.5697 | 0.6217 | 0.9689 | | No log | 16.0 | 464 | 0.1660 | 0.7240 | 0.6375 | 0.6780 | 0.9727 | | No log | 17.0 | 493 | 0.1833 | 0.7255 | 0.5896 | 0.6505 | 0.9724 | | 0.0061 | 18.0 | 522 | 0.1832 | 0.7190 | 0.6016 | 0.6551 | 0.9702 | | 0.0061 | 19.0 | 551 | 0.1762 | 0.6828 | 0.6175 | 0.6485 | 0.9707 | | 0.0061 | 20.0 | 580 | 0.1785 | 0.7346 | 0.6175 | 0.6710 | 0.9734 | | 0.0061 | 21.0 | 609 | 0.1791 | 0.7093 | 0.6414 | 0.6736 | 0.9739 | | 0.0061 | 22.0 | 638 | 0.1843 | 0.7476 | 0.6255 | 0.6811 | 0.9737 | | 0.0061 | 23.0 | 667 | 0.1837 | 0.7371 | 0.6255 | 0.6767 | 0.9734 | | 0.0061 | 24.0 | 696 | 0.1867 | 0.7176 | 0.6175 | 0.6638 | 0.9715 | | 0.0061 | 25.0 | 725 | 0.1844 | 0.7089 | 0.6016 | 0.6509 | 0.9710 | | 0.0061 | 26.0 | 754 | 0.1815 | 0.7072 | 0.6255 | 0.6638 | 0.9725 | | 0.0061 | 27.0 | 783 | 0.1822 | 0.7021 | 0.6574 | 0.6790 | 0.9737 | | 0.0061 | 28.0 | 812 | 0.1853 | 0.7048 | 0.6375 | 0.6695 | 0.9732 | | 0.0061 | 29.0 | 841 | 0.1845 | 0.7069 | 0.6534 | 0.6791 | 0.9735 | | 0.0061 | 30.0 | 870 | 0.1827 | 0.7004 | 0.6614 | 0.6803 | 0.9735 | | 0.0061 | 31.0 | 899 | 0.1850 | 0.7014 | 0.6175 | 0.6568 | 0.9719 | | 0.0061 | 32.0 | 928 | 0.1842 | 0.7072 | 0.6255 | 0.6638 | 0.9724 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Ayham/roberta_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
2023-03-25T17:47:38Z
--- tags: - UpNDown-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: UpNDown-v5 type: UpNDown-v5 metrics: - type: mean_reward value: 191595.00 +/- 74974.86 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **UpNDown-v5** This is a trained model of a PPO agent playing UpNDown-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id UpNDown-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id UpNDown-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'UpNDown-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/roberta_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-03-25T17:47:41Z
--- tags: - VideoPinball-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: VideoPinball-v5 type: VideoPinball-v5 metrics: - type: mean_reward value: 570968.20 +/- 262194.52 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **VideoPinball-v5** This is a trained model of a PPO agent playing VideoPinball-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id VideoPinball-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/VideoPinball-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/VideoPinball-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/VideoPinball-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id VideoPinball-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'VideoPinball-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/robertagpt2_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-25T17:49:04Z
--- tags: - UpNDown-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: UpNDown-v5 type: UpNDown-v5 metrics: - type: mean_reward value: 200052.00 +/- 60214.62 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **UpNDown-v5** This is a trained model of a PPO agent playing UpNDown-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id UpNDown-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id UpNDown-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'UpNDown-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/robertagpt2_xsum4
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - FishingDerby-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FishingDerby-v5 type: FishingDerby-v5 metrics: - type: mean_reward value: 25.60 +/- 14.53 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **FishingDerby-v5** This is a trained model of a PPO agent playing FishingDerby-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id FishingDerby-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id FishingDerby-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'FishingDerby-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlmroberta_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - Enduro-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Enduro-v5 type: Enduro-v5 metrics: - type: mean_reward value: 2241.30 +/- 284.69 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Enduro-v5** This is a trained model of a PPO agent playing Enduro-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Enduro-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Enduro-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Enduro-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlmroberta_large_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: unknown --- # Alpaca (fine-tuned natively) 7B model download for Alpaca.cpp, Llama.cpp, and Dalai Mirrored version of https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml in case that one gets taken down All credits go to Sosaka and chavinlo for creating the model https://huggingface.co/chavinlo/alpaca-native
Ayham/xlnet_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - conversational --- # South Park DialoGPT Model
Ayham/xlnet_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-03-25T17:52:54Z
--- tags: - Enduro-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Enduro-v5 type: Enduro-v5 metrics: - type: mean_reward value: 2344.70 +/- 18.42 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Enduro-v5** This is a trained model of a PPO agent playing Enduro-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Enduro-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Enduro-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Enduro-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlnet_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-03-25T17:53:49Z
--- tags: - Freeway-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Freeway-v5 type: Freeway-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Freeway-v5** This is a trained model of a PPO agent playing Freeway-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Freeway-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Freeway-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Freeway-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlnet_gpt_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 334.00 +/- 33.53 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlnet_roberta_new_summarization_cnn_dailymail
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 314.00 +/- 18.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayham/xlnetgpt2_xsum7
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Gopher-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Gopher-v5 type: Gopher-v5 metrics: - type: mean_reward value: 922.00 +/- 523.33 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Gopher-v5** This is a trained model of a PPO agent playing Gopher-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Gopher-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Gopher-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Gopher-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayoola/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: afl-3.0 datasets: - fka/awesome-chatgpt-prompts language: - en metrics: - code_eval library_name: asteroid ---
Ayou/chinese_mobile_bert
[ "pytorch", "mobilebert", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "MobileBertForMaskedLM" ], "model_type": "mobilebert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- tags: - Enduro-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Enduro-v5 type: Enduro-v5 metrics: - type: mean_reward value: 2317.90 +/- 109.39 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Enduro-v5** This is a trained model of a PPO agent playing Enduro-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Enduro-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Enduro-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Enduro-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayran/DialoGPT-medium-harry-1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: MarcusAGray/ppo-PyramidsTraining 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Ayran/DialoGPT-medium-harry-potter-1-through-3
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Freeway-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Freeway-v5 type: Freeway-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Freeway-v5** This is a trained model of a PPO agent playing Freeway-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Freeway-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Freeway-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Freeway-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Ayran/DialoGPT-medium-harry-potter-1-through-4-plus-6-e18
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 321.00 +/- 28.79 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BSC-LT/RoBERTalex
[ "pytorch", "roberta", "fill-mask", "es", "dataset:legal_ES", "dataset:temu_legal", "arxiv:2110.12201", "transformers", "legal", "spanish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- license: creativeml-openrail-m language: - ja - en tags: - stable-diffusion - text-to-image --- # EstrildaMix ![EstrildaMix v0.1](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/EstrildaMix_v01/img1.png) A series of models is like a dark pot merging various models. --- ## Table of Contents - [License](#license) - [How to Use](#how-to-use-recommendation) - [EstrildaMix](#estrildamix-1) - v2 - v1 & v1b - v0.1 - [AdsimilisMix](#adsimilismix) - v2 - v1 & v1a & v1b & v1c --- ## License This model is open access and available to all, with a [CreativeML Open RAIL-M](https://huggingface.co/spaces/CompVis/stable-diffusion-license) license further specifying rights and usage. The main features of this model are It has the following main features: - You can not use the model to generate content for illegal/harmful purposes. - Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. - You may reproduce and distribute copies of the Model or Derivatives of the Model, provided that You meet the following this license: Please read the full license. https://huggingface.co/spaces/CompVis/stable-diffusion-license --- ## How to Use (Recommendation) <details> <summary>Recommended Settings</summary> **Prompts** ``` 1girl, solo, high resolution, masterpiece, best quality, extremely detailed CG:0.9, illustration, ``` **Negative Prompts** ``` EasyNegative, bad anatomy, (worst quality, low quality:1.4), ((disfigured)), text:1.1, title, logo, signature, ``` _[EasyNegative](https://huggingface.co/datasets/gsdf/EasyNegative) is a negative embedding..._ **Parameters** - Sampling method: DPM++ SDE Karras - Sampling steps: 20 - Resolution: 512x768 or 768x512 - CFG Scale: 6 - Upscaler: R-ESRGAN 4x+ Anime6B - Denoising strength: 0.6 **VAE** - kl-f8-anime2.ckpt </details> --- --- ## EstrildaMix 理想の同級生を描けるようになるモデルを目指しています。 ### v1 & v1b ![Sample](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/EstrildaMix_v1/img.png) 淡い雰囲気を残しつつ、背景や細部の書き込みに強いモデルなどマージしました。 v1b は v1 に対して #5 のマージを行っていないバージョンで、ほんの少しだけ淡いタッチが出やすいことと背景がシンプル気味になります。 #### Permission (Requests) - ✅ Use the model without crediting the creator - ✅ Sell images they generate - ✅ Run on services that generate images for money - ✅ Share merges using this model - ❌ Sell this model or merges using this model - ✅ Have different permissions when sharing merges These are requests, not a formal license. But I hope you will honor this request. #### Use Models & Recipe <details> <summary>Model details</summary> | Model | License | Remarks(Notices) | | ------------------ | --------------------- | --------------------------------------------- | | estrildaMix_v01 | CreativeML OpenRAIL M | ❌ Sell this model or merges using this model | | HighRiseMixV2.5 | CreativeML OpenRAIL M | | | Orion-Mix_Version2 | CreativeML OpenRAIL M | | | X-mix V2.0 | CreativeML OpenRAIL M | ❌ Sell this model or merges using this model | | Beauty 2.5D | CreativeML OpenRAIL M | | | Kawaii 2.5DV2 | CreativeML OpenRAIL M | | This merge is using "Checkpoint Merger" of AUTOMATIC1111. | # | Model A | Model B | Multiplier | Custom Name | | --: | --------------- | ------------------ | ---------- | -------------- | | 1 | estrildaMix_v01 | HighRiseMixV2.5 | 0.1 | ev01_hrv25_1 | | 2 | ev01_hrv25_1 | Orion-Mix_Version2 | 0.1 | ev01_om2_1 | | 3 | ev01_om2_1 | X-mix V2.0 | 0.1 | evom_x2_1_o | | 4 | evom_x2_1_o | Beauty 2.5D | 0.05 | evomx_b25v2_05 | | 5 | evomx_b25v2_05 | Kawaii 2.5DV2 | 0.1 | estrildaMix_v1 | - Interpolation Method : Weighted sum - Save as float16 : true - Bake in VAE : None (only #3 baked orangemix.vae.pt) - Copy config from : A,B,C </details> --- ### v0.1 ![Sample](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/EstrildaMix_v01/img3.png) #### Examples ![Sample](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/EstrildaMix_v01/img2.png) ``` 1girl, solo, high resolution, masterpiece, best quality, extremely detailed CG:0.9, illustration, classroom, sitting, long hair, brown hair BREAK white school cardigan BREAK black pantyhose BREAK black pleated skirt BREAK brown loafer BREAK green eye, Negative prompt: EasyNegative, bad anatomy, extra arms, (worst quality, low quality:1.4), ((disfigured)), text:1.1, title, logo, signature, nsfw, Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 6, Seed: 2389546383, Size: 768x512, Model hash: 410a70a422, Model: estrildaMix_v01, Denoising strength: 0.6, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` #### Permission (Requests) - ✅ Use the model without crediting the creator - ✅ Sell images they generate - ✅ Run on services that generate images for money - ✅ Share merges using this model - ❌ Sell this model or merges using this model - ✅ Have different permissions when sharing merges These are requests, not a formal license. But I hope you will honor this request. #### Use Models & Recipe <details> <summary>Model details</summary> | Model | License | Remarks(Notices) | | ------------------ | --------------------- | --------------------------------------------- | | viewer-mix_v1.7_v2 | CreativeML OpenRAIL M | ❌ Sell this model or merges using this model | | MeinaPastel - V4 | CreativeML OpenRAIL M | ❌ Sell this model or merges using this model | This merge is using "Checkpoint Merger" of AUTOMATIC1111. | Model A | Model B | Multiplier | Custom Name | | ------------------ | ---------------- | ---------- | --------------- | | viewer-mix_v1.7_v2 | MeinaPastel - V4 | 0.3 | estrildaMix_v01 | - Interpolation Method : Weighted sum - Save as float16 : true - Copy config from : B </details> --- ## AdsimilisMix 日本のアニメ、適度なリアルさ、可愛い女の子を目指しています。 ### v2 ![AdsimilisMix v1](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/AdsimilisMix_v2/running.png) ``` 1girl, solo, outdoors, (slow motion:1.9), (motion blurred background):1.8, lens flare, morning, cityscape, looking at viewer, dutch angle, dash, running, sprint, a girl runs past, forward-bent posturem, teen, A girl running as fast as she can, school uniform, ponytail, school bag, white shirt, black socks, brown loafer, pleated skirt, motion blur Negative prompt: EasyNegative, bad anatomy, bad legs, extra legs, extra digits, nsfw, plump, from side Steps: 20, Sampler: DPM++ SDE Karras, CFG scale: 6, Seed: 3851457261, Size: 768x512, Denoising strength: 0.6, Clip skip: 2, Hires upscale: 2, Hires upscaler: R-ESRGAN 4x+ Anime6B ``` #### Permission (Requests) - ✅ Use the model without crediting the creator - ✅ Sell images they generate - ❌ Run on services that generate images for money - ✅ Share merges using this model - ❌ Sell this model or merges using this model - ✅ Have different permissions when sharing merges These are requests, not a formal license. But I hope you will honor this request. #### Use Models & Recipe <details> <summary>Model details</summary> | Model | License | Remarks(Notices) | | ---------------- | --------------------- | --------------------------------------------- | | AdsimilisMix v1a | CreativeML OpenRAIL M | | | Counterfeit-V2.5 | CreativeML OpenRAIL M | ❌ Sell this model or merges using this model | | # | Model A | Model B | Model C | Multiplier | Weights | Custom Name | | --: | ------------- | ---------------- | ------- | ------------ | -------------------------- | --------------------------- | | 1 | Pretty 2.5DV2 | Kawaii 2.5DV2 | N/A | Weighted sum | FLAT_25 | pk_flat25 | | 2 | pk_flat25 | Counterfeit-V2.5 | N/A | Weighted sum | FAKE_REVERSE_CUBIC_HERMITE | pkf_fakeReverseCubicHermite | rename to "adsimilisMix_v2" </details> --- ### v1 & v1a & v1b & v1c **v1** ![AdsimilisMix v1](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/AdsimilisMix_v1/batch4.png) **v1a, v1b, v1c** ![AdsimilisMix v1](https://raw.githubusercontent.com/big-mon/estrildamix-images/main/img/AdsimilisMix_v1/amv1abc.png) #### Permission (Requests) - ✅ Use the model without crediting the creator - ❌ Sell images they generate - ❌ Run on services that generate images for money - ✅ Share merges using this model - ❌ Sell this model or merges using this model - ✅ Have different permissions when sharing merges These are requests, not a formal license. But I hope you will honor this request. #### Use Models & Recipe <details> <summary>Model details</summary> | Model | License | Remarks(Notices) | | ------------- | --------------------- | ---------------- | | Beauty 2.5D | CreativeML OpenRAIL M | | | Kawaii 2.5DV2 | CreativeML OpenRAIL M | | | Pretty 2.5DV2 | CreativeML OpenRAIL M | | ##### v1 | # | Model A | Model B | Model C | Multiplier | Weights | Custom Name | | --: | -------------- | ------------- | ------- | ------------------ | ------- | --------------- | | 1 | Beauty 2.5D | Kawaii 2.5DV2 | N/A | Weighted sum @ 0.9 | | beautyKawaii09 | | 2 | beautyKawaii09 | Pretty 2.5DV2 | N/A | Weighted sum @ 0.4 | | adsimilisMix_v1 | ##### v1a | # | Model A | Model B | Model C | Multiplier | Weights | Custom Name | | --: | ------------- | ------------- | ------- | ------------ | ------- | ----------- | | 1 | Pretty 2.5DV2 | Kawaii 2.5DV2 | N/A | Weighted sum | FLAT_25 | pk_flat25 | rename to "adsimilisMix_v1a" ##### v1b | # | Model A | Model B | Model C | Multiplier | Weights | Custom Name | | --: | ------------- | ------------- | ------- | ------------ | ------- | ----------- | | 1 | Pretty 2.5DV2 | Kawaii 2.5DV2 | N/A | Weighted sum | WRAP08 | pk_wrap08 | rename to "adsimilisMix_v1b" ##### v1c | # | Model A | Model B | Model C | Multiplier | Weights | Custom Name | | --: | ------------- | ------------- | ------- | ------------ | -------------- | --------------- | | 1 | Pretty 2.5DV2 | Kawaii 2.5DV2 | N/A | Weighted sum | R_SMOOTHSTEP/2 | pk_2rSmoothstep | rename to "adsimilisMix_v1c" </details> ---
Bakkes/BakkesModWiki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer model-index: - name: output-tfg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # output-tfg This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Bala/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: golightly/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BalajiSathesh/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 851.30 +/- 36.85 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BatuhanYilmaz/dummy
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 9.86 +/- 3.98 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r YoanG/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Beatriz/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - MontezumaRevenge-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: MontezumaRevenge-v5 type: MontezumaRevenge-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **MontezumaRevenge-v5** This is a trained model of a PPO agent playing MontezumaRevenge-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id MontezumaRevenge-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/MontezumaRevenge-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/MontezumaRevenge-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/MontezumaRevenge-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id MontezumaRevenge-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'MontezumaRevenge-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Bee-Garbs/DialoGPT-real-cartman-small
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - ChopperCommand-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: ChopperCommand-v5 type: ChopperCommand-v5 metrics: - type: mean_reward value: 11510.00 +/- 4084.23 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **ChopperCommand-v5** This is a trained model of a PPO agent playing ChopperCommand-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id ChopperCommand-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/ChopperCommand-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/ChopperCommand-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/ChopperCommand-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id ChopperCommand-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'ChopperCommand-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Beelow/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1760.70 +/- 86.57 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Benicio/t5-small-finetuned-en-to-ro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
I will give more info but this is how to generate text with the model. You will need to install ```bash pip install peft ``` To run in python ```python from transformers import AutoModelForCausalLM, AutoTokenizer from peft import PeftConfig, PeftModelForCausalLM peft_model_id = 'GrantC/alpaca-opt-1.3b-lora' BASE_MODEL = 'facebook/opt-1.3b' config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(BASE_MODEL) tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) model = PeftModelForCausalLM.from_pretrained(model, peft_model_id, device_map="auto") prompt = "Write a blog post about shaving cream:" print(prompt) inputs = tokenizer(prompt, return_tensors='pt') output = model.generate(input_ids=inputs["input_ids"], do_sample= True, penalty_alpha=0.6, top_k=4, max_new_tokens=256) outputs = tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) print(outputs) ```
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-03-25T22:02:58Z
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image datasets: embed/EasyNegative --- ## Descriptions This model is for reproducing delicate, beautiful flat-color ligne claire style anime pictures. You can use tags like `ligne claire`, `lineart` or `monochrome` etc. to get more styles! ## Recommend settings: - VAE: Orangemix / Anything V4.5 / NAI - Sampler: DPM++ 2M Karras - Sampling steps: 20 - Negative embedding: [EasyNegative](https://civitai.com/models/7808)、[badhandv4](https://civitai.com/models/16993/badhandv4-animeillustdiffusion) ## Samples See: https://civitai.com/models/24387 ## Models used Merged with block weights tweaked: - 2020s Anime Magazine Illustration Style - Anime Lineart Style - Avas Anime Hamster - Beautiful Detailed Eyes - Chillout Mix - Epi Noise Offset - Hipoly 3D Model - Ligne Claire Anime Style - Makoto Shinkai Substyles - Mika Pikazo Style - Pastel Mix Stylized Anime - Tabi Art Style - Thicker Lines Anime Style Mix
Biasface/DDDC
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- {} --- LoRA weights for LLaMA-7b trained on a subset of the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset in which the long tail of lengthy entries are removed and the prompt is shortened to the following: ``` Appropriately respond to the following instruction: ### Instruction: Write a javascript function that sorts array alphabetically ### Response: ``` It doesn't contain the foundation model itself, so it's MIT licensed! Tuned using https://github.com/lxe/simple-llama-finetuner
BigSalmon/FormalBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-25T22:38:11Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1033.94 +/- 46.09 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/FormalBerta3
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-25T22:39:05Z
--- tags: - Riverraid-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Riverraid-v5 type: Riverraid-v5 metrics: - type: mean_reward value: 3498.00 +/- 125.76 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Riverraid-v5** This is a trained model of a PPO agent playing Riverraid-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Riverraid-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Riverraid-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Riverraid-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Riverraid-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Riverraid-v5 --seed 10 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Riverraid-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 10, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/FormalRobertaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - text-classification - generated_from_trainer datasets: - glue metrics: - accuracy - f1 widget: - text: ["Yucaipa owned Dominick 's before selling the chain to Safeway in 1998 for $ 2.5 billion.", "Yucaipa bought Dominick's in 1995 for $ 693 million and sold it to Safeway for $ 1.8 billion in 1998."] example_title: Not Equivalent - text: ["Revenue in the first quarter of the year dropped 15 percent from the same period a year earlier.", "With the scandal hanging over Stewart's company revenue the first quarter of the year dropped 15 percent from the same period a year earlier."] example_title: Equivalent model-index: - name: platzi-distingroberta-base-mrpc-glue-pixelciosa results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: mrpc split: validation args: mrpc metrics: - name: Accuracy type: accuracy value: 0.8455882352941176 - name: F1 type: f1 value: 0.8919382504288165 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-distingroberta-base-mrpc-glue-pixelciosa This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue and the mrpc datasets. It achieves the following results on the evaluation set: - Loss: 0.4939 - Accuracy: 0.8456 - F1: 0.8919 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5223 | 1.09 | 500 | 0.4939 | 0.8456 | 0.8919 | | 0.375 | 2.18 | 1000 | 0.6612 | 0.8407 | 0.8873 | | 0.1932 | 3.27 | 1500 | 0.7584 | 0.8627 | 0.9011 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/GPTIntro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - samsum metrics: - rouge model-index: - name: flan-t5-base-samsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: samsum type: samsum config: samsum split: test args: samsum metrics: - name: Rouge1 type: rouge value: 47.698 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-base-samsum This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.3721 - Rouge1: 47.698 - Rouge2: 23.8078 - Rougel: 40.1138 - Rougelsum: 43.7749 - Gen Len: 17.2759 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.4403 | 1.0 | 1842 | 1.3822 | 47.3182 | 23.8486 | 39.7145 | 43.5756 | 17.0256 | | 1.3572 | 2.0 | 3684 | 1.3747 | 47.5891 | 23.6341 | 39.7983 | 43.6862 | 17.4347 | | 1.2822 | 3.0 | 5526 | 1.3721 | 47.698 | 23.8078 | 40.1138 | 43.7749 | 17.2759 | | 1.2375 | 4.0 | 7368 | 1.3764 | 47.7671 | 24.1413 | 40.1597 | 43.9313 | 17.2943 | | 1.1935 | 5.0 | 9210 | 1.3781 | 47.626 | 23.7564 | 39.844 | 43.7166 | 17.3077 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
BigSalmon/GPTNeo350MInformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: harshil128/ML-Agents-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/InformalToFormalLincoln25
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-25T23:29:13Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: clemdev2000/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/MrLincoln
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-03-25T23:43:30Z
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: clemdev2000/MLAgents-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/MrLincoln10
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-03-25T23:47:32Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 262.85 +/- 24.12 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/MrLincoln11
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-03-25T23:48:12Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.49 +/- 0.21 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigSalmon/MrLincoln13
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - TimePilot-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: TimePilot-v5 type: TimePilot-v5 metrics: - type: mean_reward value: 10940.00 +/- 1704.23 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **TimePilot-v5** This is a trained model of a PPO agent playing TimePilot-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id TimePilot-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/TimePilot-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/TimePilot-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/TimePilot-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id TimePilot-v5 --seed 10 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'TimePilot-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 10, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/MrLincoln14
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 582.50 +/- 218.59 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga artbreguez -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga artbreguez -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga artbreguez ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BigSalmon/MrLincoln3
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- tags: - Qbert-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Qbert-v5 type: Qbert-v5 metrics: - type: mean_reward value: 15060.00 +/- 130.96 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Qbert-v5** This is a trained model of a PPO agent playing Qbert-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Qbert-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Qbert-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Qbert-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Qbert-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Qbert-v5 --seed 10 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Qbert-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 10, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/MrLincoln6
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - RoadRunner-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: RoadRunner-v5 type: RoadRunner-v5 metrics: - type: mean_reward value: 53360.00 +/- 7575.65 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **RoadRunner-v5** This is a trained model of a PPO agent playing RoadRunner-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id RoadRunner-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/RoadRunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/RoadRunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/RoadRunner-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id RoadRunner-v5 --seed 10 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'RoadRunner-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 10, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/MrLincoln8
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Enduro-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Enduro-v5 type: Enduro-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Enduro-v5** This is a trained model of a PPO agent playing Enduro-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Enduro-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Enduro-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Enduro-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/MrLincolnBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Enduro-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Enduro-v5 type: Enduro-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Enduro-v5** This is a trained model of a PPO agent playing Enduro-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Enduro-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Enduro-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Enduro-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Enduro-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/Neo
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: creativeml-openrail-m --- Anime extract will tune generations towards flat shaded anime-like look. This is an extracted lora of a merge done by a discord user DarkSide from [detailedproject](https://huggingface.co/closertodeath/detailedproject) and animelike 2.5D. Similar effort can be found [here](https://civitai.com/models/24330/a1-filter). In comparison the A1Filter, this extract tunes to a flat style more effectively. This extract was done against aom2_nsfw and the soft version was done against novel's model. CivitAI: https://civitai.com/models/24796
BigSalmon/ParaphraseParentheses
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - FishingDerby-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FishingDerby-v5 type: FishingDerby-v5 metrics: - type: mean_reward value: 26.50 +/- 10.01 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **FishingDerby-v5** This is a trained model of a PPO agent playing FishingDerby-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id FishingDerby-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id FishingDerby-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'FishingDerby-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/ParaphraseParentheses2.0
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- tags: - FishingDerby-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FishingDerby-v5 type: FishingDerby-v5 metrics: - type: mean_reward value: 29.50 +/- 6.05 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **FishingDerby-v5** This is a trained model of a PPO agent playing FishingDerby-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id FishingDerby-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id FishingDerby-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'FishingDerby-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/PhraseBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - FishingDerby-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FishingDerby-v5 type: FishingDerby-v5 metrics: - type: mean_reward value: 28.00 +/- 8.73 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **FishingDerby-v5** This is a trained model of a PPO agent playing FishingDerby-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id FishingDerby-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/FishingDerby-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id FishingDerby-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'FishingDerby-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/Points
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-03-26T00:12:19Z
--- tags: - DoubleDunk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DoubleDunk-v5 type: DoubleDunk-v5 metrics: - type: mean_reward value: -10.80 +/- 3.92 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DoubleDunk-v5** This is a trained model of a PPO agent playing DoubleDunk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id DoubleDunk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DoubleDunk-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DoubleDunk-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/Robertsy
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-03-26T00:12:54Z
--- tags: - DoubleDunk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DoubleDunk-v5 type: DoubleDunk-v5 metrics: - type: mean_reward value: -9.40 +/- 4.39 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DoubleDunk-v5** This is a trained model of a PPO agent playing DoubleDunk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id DoubleDunk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DoubleDunk-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DoubleDunk-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/Rowerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - Freeway-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Freeway-v5 type: Freeway-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Freeway-v5** This is a trained model of a PPO agent playing Freeway-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Freeway-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Freeway-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Freeway-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/T5F
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
2023-03-26T00:14:47Z
--- tags: - DoubleDunk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DoubleDunk-v5 type: DoubleDunk-v5 metrics: - type: mean_reward value: -5.80 +/- 5.02 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DoubleDunk-v5** This is a trained model of a PPO agent playing DoubleDunk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id DoubleDunk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DoubleDunk-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DoubleDunk-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DoubleDunk-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BigSalmon/TS3
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -179.38 +/- 79.59 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'Max100ce/ppo-CartPole-v1' 'batch_size': 512 'minibatch_size': 128} ```
BigSalmon/prepositions
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-03-26T00:25:16Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -0.24 +/- 0.06 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigTooth/DialoGPT-Megumin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2023-03-26T00:25:40Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.08 +/- 0.49 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigTooth/DialoGPT-small-tohru
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-26T00:28:36Z
--- tags: - generated_from_trainer model-index: - name: smashing-sexism-robert-weighted-final-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # smashing-sexism-robert-weighted-final-2 This model is a fine-tuned version of [readerbench/RoBERT-base](https://huggingface.co/readerbench/RoBERT-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.6381 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.9887 | 0.1 | 400 | 0.9251 | | 0.9326 | 0.21 | 800 | 1.0643 | | 0.8767 | 0.31 | 1200 | 0.8270 | | 0.9989 | 0.41 | 1600 | 1.0447 | | 0.8717 | 0.51 | 2000 | 0.8382 | | 0.8298 | 0.62 | 2400 | 0.8867 | | 0.9462 | 0.72 | 2800 | 0.8950 | | 0.8885 | 0.82 | 3200 | 0.8633 | | 0.9317 | 0.92 | 3600 | 0.8930 | | 0.7629 | 1.03 | 4000 | 1.1367 | | 0.7152 | 1.13 | 4400 | 0.9594 | | 0.66 | 1.23 | 4800 | 0.9411 | | 0.6867 | 1.33 | 5200 | 1.1500 | | 0.6281 | 1.44 | 5600 | 0.9684 | | 0.6442 | 1.54 | 6000 | 1.1268 | | 0.6769 | 1.64 | 6400 | 0.9762 | | 0.7184 | 1.74 | 6800 | 0.8957 | | 0.58 | 1.85 | 7200 | 0.9875 | | 0.5751 | 1.95 | 7600 | 1.2363 | | 0.4031 | 2.05 | 8000 | 1.3173 | | 0.3862 | 2.15 | 8400 | 1.3331 | | 0.5009 | 2.26 | 8800 | 1.4265 | | 0.4591 | 2.36 | 9200 | 1.5329 | | 0.4284 | 2.46 | 9600 | 1.3033 | | 0.5236 | 2.56 | 10000 | 1.2444 | | 0.5135 | 2.67 | 10400 | 1.2472 | | 0.5369 | 2.77 | 10800 | 1.6505 | | 0.4701 | 2.87 | 11200 | 1.3840 | | 0.5371 | 2.97 | 11600 | 1.3600 | | 0.2557 | 3.08 | 12000 | 1.4148 | | 0.2952 | 3.18 | 12400 | 1.7975 | | 0.2098 | 3.28 | 12800 | 2.0480 | | 0.236 | 3.38 | 13200 | 1.9231 | | 0.2414 | 3.49 | 13600 | 1.6038 | | 0.387 | 3.59 | 14000 | 1.6627 | | 0.3059 | 3.69 | 14400 | 1.5931 | | 0.2872 | 3.79 | 14800 | 1.5828 | | 0.1751 | 3.9 | 15200 | 1.9071 | | 0.2429 | 4.0 | 15600 | 1.6990 | | 0.164 | 4.1 | 16000 | 1.9178 | | 0.0941 | 4.2 | 16400 | 2.1213 | | 0.1948 | 4.31 | 16800 | 2.0160 | | 0.1442 | 4.41 | 17200 | 2.0305 | | 0.2209 | 4.51 | 17600 | 1.9717 | | 0.1375 | 4.61 | 18000 | 2.0309 | | 0.1995 | 4.72 | 18400 | 2.0615 | | 0.1421 | 4.82 | 18800 | 2.0320 | | 0.2076 | 4.92 | 19200 | 1.9974 | | 0.0748 | 5.02 | 19600 | 1.9942 | | 0.0689 | 5.13 | 20000 | 2.1029 | | 0.0841 | 5.23 | 20400 | 2.2356 | | 0.0782 | 5.33 | 20800 | 2.2074 | | 0.1662 | 5.43 | 21200 | 2.3315 | | 0.0415 | 5.54 | 21600 | 2.5986 | | 0.0731 | 5.64 | 22000 | 2.2913 | | 0.0851 | 5.74 | 22400 | 2.4306 | | 0.0923 | 5.84 | 22800 | 2.4737 | | 0.099 | 5.95 | 23200 | 2.2077 | | 0.0297 | 6.05 | 23600 | 2.2406 | | 0.0365 | 6.15 | 24000 | 2.5536 | | 0.0131 | 6.25 | 24400 | 2.7311 | | 0.0838 | 6.36 | 24800 | 2.3021 | | 0.0392 | 6.46 | 25200 | 2.4769 | | 0.0357 | 6.56 | 25600 | 2.4404 | | 0.0955 | 6.66 | 26000 | 2.4813 | | 0.1119 | 6.77 | 26400 | 2.3819 | | 0.0916 | 6.87 | 26800 | 2.5341 | | 0.1437 | 6.97 | 27200 | 2.2940 | | 0.0333 | 7.08 | 27600 | 2.4652 | | 0.0276 | 7.18 | 28000 | 2.5684 | | 0.0306 | 7.28 | 28400 | 2.4722 | | 0.0248 | 7.38 | 28800 | 2.7375 | | 0.0199 | 7.49 | 29200 | 2.7708 | | 0.0443 | 7.59 | 29600 | 2.7067 | | 0.0119 | 7.69 | 30000 | 2.6394 | | 0.0606 | 7.79 | 30400 | 2.5045 | | 0.0467 | 7.9 | 30800 | 2.3479 | | 0.0438 | 8.0 | 31200 | 2.7489 | | 0.0033 | 8.1 | 31600 | 2.6423 | | 0.0306 | 8.2 | 32000 | 2.5070 | | 0.033 | 8.31 | 32400 | 2.7068 | | 0.0114 | 8.41 | 32800 | 2.7400 | | 0.0032 | 8.51 | 33200 | 2.5803 | | 0.0305 | 8.61 | 33600 | 2.8058 | | 0.0253 | 8.72 | 34000 | 2.5497 | | 0.0183 | 8.82 | 34400 | 2.5782 | | 0.0651 | 8.92 | 34800 | 2.7173 | | 0.0345 | 9.02 | 35200 | 2.5939 | | 0.0206 | 9.13 | 35600 | 2.6243 | | 0.0018 | 9.23 | 36000 | 2.5503 | | 0.0484 | 9.33 | 36400 | 2.7006 | | 0.0359 | 9.43 | 36800 | 2.6202 | | 0.006 | 9.54 | 37200 | 2.6260 | | 0.0205 | 9.64 | 37600 | 2.7143 | | 0.0153 | 9.74 | 38000 | 2.6923 | | 0.0342 | 9.84 | 38400 | 2.6475 | | 0.011 | 9.95 | 38800 | 2.6381 | ### Framework versions - Transformers 4.27.3 - Pytorch 2.0.0+cu117 - Datasets 2.10.1 - Tokenizers 0.13.2
BigeS/DialoGPT-small-Rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-03-26T00:30:14Z
--- tags: - Tutankham-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Tutankham-v5 type: Tutankham-v5 metrics: - type: mean_reward value: 245.30 +/- 16.30 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Tutankham-v5** This is a trained model of a PPO agent playing Tutankham-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --env-id Tutankham-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Tutankham-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/cleanba_impala_envpool_machado_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Tutankham-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Tutankham-v5-cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4-seed10/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_machado_atari_wrapper.py --exp-name cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Tutankham-v5 --seed 10 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Tutankham-v5', 'exp_name': 'cleanba_impala_envpool_machado_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 10, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BinksSachary/DialoGPT-small-shaxx
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - Gopher-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Gopher-v5 type: Gopher-v5 metrics: - type: mean_reward value: 1376.00 +/- 791.85 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Gopher-v5** This is a trained model of a PPO agent playing Gopher-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Gopher-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Gopher-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Gopher-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BinksSachary/ShaxxBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-03-26T00:39:20Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### hftoken Dreambooth model trained by ukeeba with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
BinksSachary/ShaxxBot2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-03-26T00:39:54Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1510084170164355076/7f6ijkJo_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Paint</div> <div style="text-align: center; font-size: 14px;">@roach_collector</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Paint. | Data | Paint | | --- | --- | | Tweets downloaded | 732 | | Retweets | 0 | | Short tweets | 105 | | Tweets kept | 627 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/ud9k8dh2/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @roach_collector's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/asbhe8tu) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/asbhe8tu/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/roach_collector') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Blabla/Pipipopo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Gopher-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Gopher-v5 type: Gopher-v5 metrics: - type: mean_reward value: 12092.00 +/- 5138.32 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Gopher-v5** This is a trained model of a PPO agent playing Gopher-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Gopher-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Gopher-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Gopher-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Gopher-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Blaine-Mason/hackMIT-finetuned-sst2
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 300.00 +/- 26.08 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
Blerrrry/Kkk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 310.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BlightZz/DialoGPT-medium-Kurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- tags: - Frostbite-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Frostbite-v5 type: Frostbite-v5 metrics: - type: mean_reward value: 309.00 +/- 21.66 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Frostbite-v5** This is a trained model of a PPO agent playing Frostbite-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Frostbite-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Frostbite-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Frostbite-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Frostbite-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BlightZz/MakiseKurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - Freeway-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Freeway-v5 type: Freeway-v5 metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Freeway-v5** This is a trained model of a PPO agent playing Freeway-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Freeway-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Freeway-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Freeway-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BlueGamerBeast/DialoGPT-small-joshua
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Gravitar-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Gravitar-v5 type: Gravitar-v5 metrics: - type: mean_reward value: 1875.00 +/- 845.95 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Gravitar-v5** This is a trained model of a PPO agent playing Gravitar-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id Gravitar-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Gravitar-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/Gravitar-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Gravitar-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Gravitar-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Gravitar-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```
BobBraico/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: shermansiu/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BobBraico/distilbert-base-uncased-finetuned-imdb
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - IceHockey-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: IceHockey-v5 type: IceHockey-v5 metrics: - type: mean_reward value: 5.20 +/- 3.71 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **IceHockey-v5** This is a trained model of a PPO agent playing IceHockey-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --env-id IceHockey-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/IceHockey-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/cleanba_impala_envpool_impala_atari_wrapper.py curl -OL https://huggingface.co/cleanrl/IceHockey-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/IceHockey-v5-cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_impala_envpool_impala_atari_wrapper.py --exp-name cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4 --distributed --learner-device-ids 1 --local-num-envs 30 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id IceHockey-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 30, 'async_update': 1, 'batch_size': 2400, 'capture_video': False, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'IceHockey-v5', 'exp_name': 'cleanba_impala_envpool_impala_atari_wrapper_a0_l1_d4', 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3', 'gpu:5', 'gpu:7'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 600, 'local_minibatch_size': 300, 'local_num_envs': 30, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 1200, 'num_envs': 120, 'num_minibatches': 2, 'num_steps': 20, 'num_updates': 20833, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 4} ```