modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Denilson/gbert-base-germaner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 19.40 +/- 11.67 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Deniskin/emailer_medium_300
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - fastai --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
Deniskin/essays_small_2000i
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinfoce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
Deniskin/gpt3_medium
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: pythia-70M results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pythia-70M This model is a fine-tuned version of [EleutherAI/pythia-70M](https://huggingface.co/EleutherAI/pythia-70M) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -181.88 +/- 167.18 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.0003 'num_envs': 4 'num_steps': 64 'anneal_lr': True 'gae': True 'gamma': 0.98 'gae_lambda': 0.95 'num_minibatches': 8 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'frangiral/ppo-LunarLander-v2-1' 'batch_size': 256 'minibatch_size': 32} ```
DeskDown/MarianMixFT_en-fil
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for my first Diffusion Model which is for unconditional image generation of beautiful butterflies. This model is trained on a dataset of butterflies of image size 32*32 ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('NvkAnirudh/sd-butterflies32') image = pipeline().images[0] image ```
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: pythia-160M results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pythia-160M This model is a fine-tuned version of [EleutherAI/pythia-160M](https://huggingface.co/EleutherAI/pythia-160M) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
DeskDown/MarianMixFT_en-ms
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ksathur/bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ksathur/bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0871 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1310, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 0.1770 | 0 | | 0.1526 | 1 | | 0.1185 | 2 | | 0.1124 | 3 | | 0.0871 | 4 | ### Framework versions - Transformers 4.26.1 - TensorFlow 2.11.0 - Datasets 2.10.0 - Tokenizers 0.13.2
DeskDown/MarianMixFT_en-my
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-02-22T10:54:28Z
--- license: openrail tags: - stable-diffusion - lora --- ## Duelyst Codex Landscape LORA (Model Card will be updated later with proper example images and prompt settings, WIP) <img src="https://huggingface.co/cadaeic/duelyst-codex-lora/resolve/main/im_20230222110424_000_988913356.png"/> **Prompt:** painting of a fantasy landscape of a castle --- LORA trained on open source fantasy landscape art from the shut down game [Duelyst](https://en.wikipedia.org/wiki/Duelyst), whose assets have been uploaded to GitHub under the CC0 license. Dataset obtained from [The Public Conscious](http://library.lowframerate.games/), a collection of public domain images. ### Notes - Trained over Stable Diffusion 1.5 and will work with SD 1.x models - Overtrained on fantasy landscapes - My very first Lora, and pretty much a test Lora! - Trained with [this Colab notebook](https://colab.research.google.com/github/Linaqruf/kohya-trainer/blob/main/kohya-LoRA-finetuner.ipynb) using default settings with a free Google colab account for 40 minutes --- <img src="https://huggingface.co/cadaeic/duelyst-codex-lora/resolve/main/im_20230222110707_001_141397401.png"/> **Prompt:** fantasy painting of a fat grey british shorthair cat
DeskDown/MarianMixFT_en-th
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### first_avartar Dreambooth model trained by zhoubinjason with [buildspace's DreamBooth](https://colab.research.google.com/github/buildspace/diffusers/blob/main/examples/dreambooth/DreamBooth_Stable_Diffusion.ipynb) notebook Build your own using the [AI Avatar project](https://buildspace.so/builds/ai-avatar)! To get started head over to the [project dashboard](https://buildspace.so/p/build-ai-avatars). Sample pictures of this concept: ![0](https://huggingface.co/zhoubinjason/first-avartar/resolve/main/sample_images/FpaNJRSXsAEkKv1-removebg-preview.png) ![1](https://huggingface.co/zhoubinjason/first-avartar/resolve/main/sample_images/FpaNJRSXsAEkKv1.png)
DeskDown/MarianMixFT_en-vi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: openrail --- ![](https://i.imgur.com/lFDU26M.png) ``` Model : Microworld v3~4 Base : Seek.art (Mega v1.0), OpenJourney v1.5 Type : Dreambooth SD Training Epochs : 5000 Encoder : 350 Images : 50~ Captions: Yes Trigger : microworld Credits : Barret, Coreco, PublicPrompts, PromptHero ``` ![](https://i.imgur.com/uSafxVZ.png) ``` Model : mw4_critters Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, food_crit Credits : Barret, Plasm0 ``` ![](https://i.imgur.com/lwwpwyB.png) ``` Model : mw4_knollingcase Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, knollingcase Credits : Barret, Aybeeceedee ``` ![](https://i.imgur.com/mfje4x5.png) ``` Model : mw4_steampunk Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, steampunkai Credits : Barret, KoningWouter ``` ![](https://i.imgur.com/3FxtLiJ.png) ![](https://i.imgur.com/tUbtbYv.png) ``` Model : mw4_characters Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, cbzbb Credits : Barret, RichVIP ``` ![](https://i.imgur.com/Nc337bX.png) ``` Model : mw4_clay Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, clayitization Credits : Barret, Plasm0 ``` ![](https://i.imgur.com/7b1qQ4r.png) ![](https://i.imgur.com/Arzvi5N.png) ``` Model : mw4_shatter Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, mdjrny-shttr Credits : Barret, ShadoWxShinigamI ``` ![](https://i.imgur.com/mLG5ckI.png) ``` Model : mw4_disney Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, modern disney style Credits : Barret, nitrosocke ``` ![](https://cdn.discordapp.com/attachments/1020076662269939822/1077983620268036186/grid-0015-2286867846e0291da2d17a54c8a5c9089945edf2873262682a-1.png) ``` Model : mw4_inkpunk Base : mw4 Type : SD Merge Weight : 0.5 Trigger : microworld, nvinkpunk Credits : Barret, Envvi Ai ```
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.05 +/- 15.60 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: other tags: - generated_from_trainer datasets: - imdb model-index: - name: opt-125m results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opt-125m This model is a fine-tuned version of [facebook/opt-125m](https://huggingface.co/facebook/opt-125m) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: cc-by-4.0 tags: - generated_from_trainer model-index: - name: roberta-finetuned-subjqa-movies_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-finetuned-subjqa-movies_2 This model is a fine-tuned version of [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-Slippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.71 +/- 0.45 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="AigizK/q-FrozenLake-v1-4x4-Slippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Devid/DialoGPT-small-Miku
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: G-e-o-r-g-e/ppo-PyramidsTraining 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Devrim/prism-default
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 552.50 +/- 174.46 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sheryliza -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sheryliza -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sheryliza ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
DevsIA/imagenes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-02-22T11:13:43Z
--- tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 523.50 +/- 219.93 name: mean_reward verified: false --- # (CleanRL) **DQN** Agent Playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_atari.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[dqn_atari]" python -m cleanrl_utils.enjoy --exp-name dqn_atari --env-id SpaceInvadersNoFrameskip-v4 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/ssw1591/SpaceInvadersNoFrameskip-v4/raw/main/dqn_atari.py curl -OL https://huggingface.co/ssw1591/SpaceInvadersNoFrameskip-v4/raw/main/pyproject.toml curl -OL https://huggingface.co/ssw1591/SpaceInvadersNoFrameskip-v4/raw/main/poetry.lock poetry install --all-extras python dqn_atari.py --env-id SpaceInvadersNoFrameskip-v4 --total-timesteps 1000000 --capture-video --save-model --cuda --upload-model --hf-entity ssw1591 --seed 2 ``` # Hyperparameters ```python {'batch_size': 32, 'buffer_size': 1000000, 'capture_video': True, 'cuda': True, 'end_e': 0.01, 'env_id': 'SpaceInvadersNoFrameskip-v4', 'exp_name': 'dqn_atari', 'exploration_fraction': 0.1, 'gamma': 0.99, 'hf_entity': 'ssw1591', 'learning_rate': 0.0001, 'learning_starts': 80000, 'save_model': True, 'seed': 2, 'start_e': 1, 'target_network_frequency': 1000, 'tau': 1.0, 'torch_deterministic': True, 'total_timesteps': 1000000, 'track': False, 'train_frequency': 4, 'upload_model': True, 'wandb_entity': None, 'wandb_project_name': 'cleanRL'} ```
DewiBrynJones/wav2vec2-large-xlsr-welsh
[ "cy", "dataset:common_voice", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb model-index: - name: distilbert-base-uncased-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-imdb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 2.4721 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7086 | 1.0 | 157 | 2.4898 | | 2.5796 | 2.0 | 314 | 2.4230 | | 2.5269 | 3.0 | 471 | 2.4354 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Dhruva/Interstellar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sklearn tags: - sklearn - skops - tabular-classification - scikit-learn-intelex model_format: pickle model_file: model-optimized.pkl widget: structuredData: x0: - -2.91869894329896 - 1.2861311367611363 - -0.17676780746347887 x1: - 4.378017491251676 - -3.150744413325807 - -3.268596999474564 x10: - 0.8047174523586147 - -1.2969581776011339 - 0.31762144434782824 x11: - -3.2390344195350487 - 1.1959946004673214 - 0.7563114595834011 x12: - -0.5746008540164298 - 3.3486745844798804 - 2.948767259442758 x13: - 3.5915430703361673 - 3.5265729573580655 - 0.015963649217312414 x14: - 0.8137258237766396 - -3.6846723813183138 - -0.0726270826536789 x2: - 1.660608706929798 - -2.915945269906298 - -0.2966018717870358 x3: - 5.717843051082721 - 0.6565179693281937 - 3.2575477536637036 x4: - -1.319254071176227 - 3.2948523559028287 - 1.5435232801320602 x5: - -2.9780546324477646 - -1.3618488406102902 - 1.5867699986090962 x6: - 1.6471024314152358 - 1.3658191827488237 - -1.4529064414158699 x7: - 0.3525021389263907 - 1.3302365122960365 - 0.09438131139298833 x8: - -0.44858117376143913 - 0.9049016837557153 - 2.195212749960551 x9: - 0.5394501179095126 - -2.85779169799503 - 1.3981326527555564 --- # Model description [More Information Needed] ## Intended uses & limitations [More Information Needed] ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |------------------|-----------| | algorithm | auto | | leaf_size | 30 | | metric | minkowski | | metric_params | | | n_jobs | | | n_neighbors | 3 | | p | 2 | | weights | uniform | </details> ### Model Plot The model plot is below. <style>#sk-container-id-4 {color: black;background-color: white;}#sk-container-id-4 pre{padding: 0;}#sk-container-id-4 div.sk-toggleable {background-color: white;}#sk-container-id-4 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-4 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-4 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-4 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-4 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-4 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-4 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-4 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-4 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-4 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-4 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-4 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-4 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-4 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-4 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-4 div.sk-item {position: relative;z-index: 1;}#sk-container-id-4 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-4 div.sk-item::before, #sk-container-id-4 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-4 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-4 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-4 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-4 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-4 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-4 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-4 div.sk-label-container {text-align: center;}#sk-container-id-4 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-4 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-4" class="sk-top-container" style="overflow: auto;"><div class="sk-text-repr-fallback"><pre>KNeighborsClassifier(n_neighbors=3)</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-4" type="checkbox" checked><label for="sk-estimator-id-4" class="sk-toggleable__label sk-toggleable__label-arrow">KNeighborsClassifier</label><div class="sk-toggleable__content"><pre>KNeighborsClassifier(n_neighbors=3)</pre></div></div></div></div></div> ## Evaluation Results [More Information Needed] # How to Get Started with the Model [More Information Needed] # Model Card Authors This model card is written by following authors: [More Information Needed] # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` [More Information Needed] ```
Digakive/Hsgshs
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 8.82 +/- 3.19 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r frangiral/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Dilmk2/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: validation args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8616803278688524 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1377 - F1: 0.8617 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2577 | 1.0 | 525 | 0.1668 | 0.8145 | | 0.1285 | 2.0 | 1050 | 0.1376 | 0.8495 | | 0.0812 | 3.0 | 1575 | 0.1377 | 0.8617 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
DimaOrekhov/transformer-method-name
[ "pytorch", "encoder-decoder", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 - accuracy model-index: - name: final_model_category results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # final_model_category This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0769 - F1: 0.9648 - Roc Auc: 0.9727 - Accuracy: 0.9129 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:------:|:-------:|:--------:| | 0.1418 | 1.0 | 3445 | 0.1373 | 0.9339 | 0.9495 | 0.8409 | | 0.124 | 2.0 | 6890 | 0.1221 | 0.9411 | 0.9534 | 0.8571 | | 0.1201 | 3.0 | 10335 | 0.1123 | 0.9466 | 0.9592 | 0.8705 | | 0.0918 | 4.0 | 13780 | 0.0933 | 0.9547 | 0.9640 | 0.8891 | | 0.0779 | 5.0 | 17225 | 0.0804 | 0.9635 | 0.9712 | 0.9129 | | 0.0694 | 6.0 | 20670 | 0.0769 | 0.9648 | 0.9727 | 0.9129 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
Dizoid/Lll
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget library_name: ml-agents --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Write your model_id: sheryliza/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Doohae/q_encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - generated_from_trainer datasets: - city_learn model-index: - name: output results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # output This model is a fine-tuned version of [](https://huggingface.co/) on the city_learn dataset. ## Model description state_mean = np.array( [6.51944444e+00, 3.98379630e+00, 1.25000000e+01, 1.67850000e+01, 1.67849190e+01, 1.67851968e+01, 1.67854977e+01, 7.28990741e+01, 7.29056713e+01, 7.29093750e+01, 7.29134259e+01, 2.07319097e+02, 2.07319097e+02, 2.07185417e+02, 2.07236111e+02, 2.01118634e+02, 2.01118634e+02, 2.00806481e+02, 2.00887616e+02, 1.56366486e-01, 1.05916886e+00, 6.96371636e-01, 2.91179937e-01, 3.99157702e-01, 2.73105321e-01, 2.73105321e-01, 2.73105321e-01, 2.73105321e-01]) state_std = np.array( [3.47125753e+00, 2.00155513e+00, 6.92218755e+00, 3.55389420e+00, 3.55381195e+00, 3.55403913e+00, 3.55461251e+00, 1.65420140e+01, 1.65465337e+01, 1.65478974e+01, 1.65489647e+01, 2.91883900e+02, 2.91883900e+02, 2.91755278e+02, 2.91833913e+02, 2.96415007e+02, 2.96415007e+02, 2.96260649e+02, 2.96305327e+02, 3.53750260e-02, 8.83521126e-01, 1.01549677e+00, 3.23319869e-01, 9.20646312e-01, 1.17879328e-01, 1.17879328e-01, 1.17879328e-01, 1.17879328e-01]) ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 360 ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
DoyyingFace/bert-asian-hate-tweets-asian-unclean-with-clean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: wtfpl --- Lora Backup. In case that civitai get 502/503 or even worse one day. --- Lora备份.以防哪天civitai什么的炸了。 ==== 角色 ==== Abigail Swimsuit 阿比盖尔(水着) Ishtar 伊什塔尔 Colossus Omega Chan 高达酱 Angela Balzac 安洁拉·巴尔扎克 Ayachi Nene 绫地宁宁 Minato Aqua Nurse 凑阿库娅(护士) Tyrca Corrupted 狄璐卡(恶堕) Yui 优衣(礼服) Haneoka Meimi 羽丘芽美(怪盗圣少女) Azami (Kagerou Project) 蓟(阳炎Project) Mikoshi Chiyo (Genshin Impact) 御舆千代(原神) Sustainer of Heavenly Principles (Genshin Impact) 天理的维系者(原神) Streetward Rambler/Young Madame Ping (Genshin Impact) 歌尘浪市真君/年轻的萍姥姥(原神) Sandrone (Genshin Impact) “木偶”桑多涅(原神) Dunyarzad (Genshin Impact) 迪娜泽黛(原神) Hu Tao (Lawson) (Genshin Impact) 胡桃 罗森联动 (原神) Yoimiya (Lawson) (Genshin Impact) 宵宫 罗森联动 (原神) Shiraori/Kumoko(Human Form) 白织/蜘蛛子(人形态) Sophia Keren 索菲亚/吸血子 Shichimiya Satone(Chuunibyou Koi) 七宫智音(中二恋) DSR-50 Highest Bid & Red Peony (Girls' Frontline) DSR-50 最高出价&红牡丹 Akiyama Mizuki (Project Sekai) 晓山瑞希 Platinum Shimmering Dew (Arknights) 白金 灿阳朝露 (明日方舟) Shdow Futaba (Persona5) 暗影双叶(P5) Hatsune(Summer) Princess Connect! 初音(夏日) 公主连结 绀紫之心 礼服 绀紫之心 混沌形态/恶堕 绀紫妹妹 混沌形态/恶堕 ======== 碧蓝航线 ======== 恐怖 万圣节 比叡 睡裙 伏罗希洛夫 温泉 德意志 泳装 斯佩 日常+礼服 武藏 堇色月影 塔什干 休息日 确捷 泳装 腓特烈大帝 婚纱 夕立 圣诞节 Chen Hai Cerulean Ripples 镇海(潋滟水色) Neptune Princess of the Reindeers 海王星(圣诞节) Dido Doll 黛朵(礼服) Drake Swimsuit 德雷克(泳装) Brest 布雷斯特 Veneto Swimsuit 维托里奥·维内托(泳装) Amagi Swimsuit 天城(泳装) Chapayev Prisoner 恰巴耶夫(囚服) Formidable Swimsuit 可畏(泳装) Hwah Jah Jiangshi/Zombie 华甲(僵尸) La Galissonnière Black Cat 拉加利索尼埃(黑猫) Manchester Succubus Nurse 曼彻斯特(魅魔护士) Nagato Babydoll 长门(睡衣) Perseus 英仙座 Shimakaze Alice 岛风(爱丽丝) Tashkent Bound 塔什干(受缚) Albion 阿尔比恩 Albion Succubus 阿尔比恩(魅魔) Janus Succubus 雅努斯(魅魔) Kashino Swimsuit 樫野(泳装) Yet-san Ruqun 逸仙(襦裙) Massachusetts Cocktail Dress 马萨诸塞(礼服) ======== 概念/服装 ======== 不挠女仆装Cos 绫地宁宁Cos Erotic Jiangshi Costume 涩情僵尸装 Swimsuit Pull 扯泳衣 Azure Horizons China Dress 天狼星碧波青云奶盖旗袍 Le Malin's Bunnysuit 恶毒兔女郎cos Seyana Meme Naked Hoodie 裸体卫衣 Naked Bathrobe 裸体浴袍 Riding Machine 骑马机 Mouth Holding Bikini 嘴叼比基尼 Bruce Lee's Jumpsuit 李小龙外套 Marilyn Monroe Dress Tug Cosplay 梦露压裙子Cosplay Human Dog 犬化调教 Naked Bandage 果体绷带 Dangerous Beast 危险野兽 Plant Girl 植物娘 Barcode on Breasts 胸部条形码 Soap Bubble Censor 泡沫遮胸 Carrot Insertion 种萝卜 Delivery 躺在箱里 Mechanical Restrained 机械四肢拘束 Hair Censor 头发遮胸 Hair Over Crotch 头发遮三点 Girl In Cocktail Glass 坐在鸡尾酒杯里 Flower on Pussy 花朵遮阴 Straitjacket 拘束衣 Through Screen 钻出屏幕 Crotch Plate 遮阴板 Crucifixion 十字架
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38,156
2023-02-22T13:05:54Z
--- license: gpl-3.0 language: - en pipeline_tag: text2text-generation tags: - code - asr - inverse text normalization datasets: - pavanBuduguppa/asr_inverse_text_normalization --- --- --- # asr_inverse_text_normalization Finetuned a facebook/bart-base Pretrained model on the ASR inverse text normalization dataset by treating it as a seq2seq task. Other approaches which may be considered is by considering it as a TokenClassification task and the one mentioned here https://machinelearning.apple.com/research/inverse-text-normal. ## Model description BART (Bidirectional and Auto-Regressive Transformers) is a pre-trained transformer-based neural network model developed by Facebook AI Research (FAIR) for various natural language processing (NLP) tasks The BART architecture is based on the Transformer model, which is a type of neural network architecture that processes sequential input data, such as text, by applying self-attention mechanisms to capture the relationships between different words in the input sequence. BART includes both auto-regressive and bidirectional encoder-decoder transformer architectures, which enable it to perform both generation and prediction tasks BART was trained on a diverse range of NLP tasks, including machine translation, summarization, and question answering, and has shown strong performance across multiple benchmarks. Its training process involves corrupting text with different types of noise and training the model to reconstruct the original text, which has been shown to improve the model's ability to generalize to new tasks and outperform other pre-trained language models like GPT and BERT The model flavour which was chosen is that of "facebook/bart-base" and columns "after" is used as the source while "before" column is used as the targets. ## Intended uses & limitations This model can be used as an out-of-the-box solution to the invesrse text normalization which can convert ASR generated un-normalized text such as "my c v v for my card is five six seven and it expires on november twenty three" -> "my CVV for my card is 567 and it expires on November 23" The model needs to be explored for various min and max length setting at the time of generation for your specific usecase ### How to use ```python >>> from transformers import pipeline >>> generator = pipeline(model="pavanBuduguppa/asr_inverse_text_normalization") >>> generator("my c v v for my card is five six seven and it expires on november twenty three") ``` ## Training data All credits and rights for the training data belongs to Google. The data was merely obtained and processed for this model and the original data can be found here https://www.kaggle.com/competitions/text-normalization-challenge-english-language/data
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,785,283
2023-02-22T13:08:19Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Isaacgv/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
albert-large-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26,792
2023-02-22T13:15:40Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 290.01 +/- 20.76 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
albert-xxlarge-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42,640
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: bert-base-uncased-finetuned-iemocap8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-iemocap8 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8968 - Accuracy: 0.6654 - F1: 0.6723 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.319412088241492e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 51 | 1.0531 | 0.5597 | 0.5655 | | 1.0284 | 2.0 | 102 | 0.9370 | 0.6227 | 0.6304 | | 1.0284 | 3.0 | 153 | 0.8796 | 0.6722 | 0.6765 | | 0.4432 | 4.0 | 204 | 0.9785 | 0.6654 | 0.6727 | | 0.4432 | 5.0 | 255 | 1.0664 | 0.6586 | 0.6634 | | 0.2492 | 6.0 | 306 | 1.1291 | 0.6499 | 0.6606 | | 0.2492 | 7.0 | 357 | 1.1847 | 0.6702 | 0.6777 | | 0.1707 | 8.0 | 408 | 1.4084 | 0.6508 | 0.6534 | | 0.1707 | 9.0 | 459 | 1.3468 | 0.6702 | 0.6762 | | 0.1461 | 10.0 | 510 | 1.4245 | 0.6634 | 0.6710 | | 0.1461 | 11.0 | 561 | 1.4865 | 0.6499 | 0.6600 | | 0.1262 | 12.0 | 612 | 1.4616 | 0.6576 | 0.6656 | | 0.1262 | 13.0 | 663 | 1.5335 | 0.6663 | 0.6711 | | 0.1203 | 14.0 | 714 | 1.4855 | 0.6731 | 0.6806 | | 0.1203 | 15.0 | 765 | 1.5825 | 0.6712 | 0.6792 | | 0.1023 | 16.0 | 816 | 1.7145 | 0.6731 | 0.6794 | | 0.1023 | 17.0 | 867 | 1.6676 | 0.6751 | 0.6823 | | 0.0976 | 18.0 | 918 | 1.8013 | 0.6693 | 0.6719 | | 0.0976 | 19.0 | 969 | 1.7192 | 0.6673 | 0.6755 | | 0.0937 | 20.0 | 1020 | 1.7837 | 0.6654 | 0.6731 | | 0.0937 | 21.0 | 1071 | 1.7779 | 0.6760 | 0.6831 | | 0.0901 | 22.0 | 1122 | 1.8352 | 0.6615 | 0.6687 | | 0.0901 | 23.0 | 1173 | 1.8601 | 0.6596 | 0.6656 | | 0.0844 | 24.0 | 1224 | 1.9129 | 0.6625 | 0.6719 | | 0.0844 | 25.0 | 1275 | 1.8507 | 0.6731 | 0.6784 | | 0.0829 | 26.0 | 1326 | 1.8582 | 0.6673 | 0.6735 | | 0.0829 | 27.0 | 1377 | 1.8670 | 0.6770 | 0.6825 | | 0.0839 | 28.0 | 1428 | 1.8763 | 0.6741 | 0.6800 | | 0.0839 | 29.0 | 1479 | 1.8925 | 0.6702 | 0.6769 | | 0.0802 | 30.0 | 1530 | 1.8968 | 0.6654 | 0.6723 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2023-02-22T13:26:16Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: polgrad-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2023-02-22T13:28:35Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids library_name: ml-agents --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Write your model_id: mikato/ppo-Pyramids 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68,305
2023-02-22T13:29:22Z
--- license: mit --- Model Description [Mixture of Diffusers](https://arxiv.org/abs/2302.02412)
bert-base-multilingual-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
328,585
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
distilbert-base-uncased-finetuned-sst-2-english
[ "pytorch", "tf", "rust", "safetensors", "distilbert", "text-classification", "en", "dataset:sst2", "dataset:glue", "arxiv:1910.01108", "doi:10.57967/hf/0181", "transformers", "license:apache-2.0", "model-index", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,060,704
2023-02-22T13:59:21Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -2.73 +/- 0.64 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
distilgpt2
[ "pytorch", "tf", "jax", "tflite", "rust", "coreml", "safetensors", "gpt2", "text-generation", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:2201.08542", "arxiv:2203.12574", "arxiv:1910.09700", "arxiv:1503.02531", "transformers", "exbert", "license:apache-2.0", "model-index", "co2_eq_emissions", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,611,668
2023-02-22T14:01:37Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - fl_image_category_ds metrics: - accuracy model-index: - name: project_name results: - task: name: Image Classification type: image-classification dataset: name: fl_image_category_ds type: fl_image_category_ds config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.6621621621621622 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # project_name This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the fl_image_category_ds dataset. It achieves the following results on the evaluation set: - Loss: 0.9537 - Accuracy: 0.6622 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3368 | 1.0 | 88 | 1.2575 | 0.5448 | | 1.1146 | 2.0 | 176 | 1.0928 | 0.6038 | | 0.9667 | 3.0 | 264 | 1.0195 | 0.6223 | | 0.9005 | 4.0 | 352 | 0.9832 | 0.6373 | | 0.8432 | 5.0 | 440 | 0.9537 | 0.6622 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Aakansha/hs
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-02-22T18:51:41Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 7.21 +/- 2.30 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r NoNameFound/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Aarav/MeanMadCrazy_HarryPotterBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AdapterHub/bert-base-uncased-pf-emo
[ "bert", "en", "dataset:emo", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 13.55 +/- 5.33 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r GrimReaperSam/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AdapterHub/bert-base-uncased-pf-scitail
[ "bert", "en", "dataset:scitail", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:nli/scitail" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - autotrain - token-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - Andrei95/autotrain-data-jobbert15 co2_eq_emissions: emissions: 1.6240218297608988 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 3669997969 - CO2 Emissions (in grams): 1.6240 ## Validation Metrics - Loss: 0.293 - Accuracy: 0.902 - Precision: 0.547 - Recall: 0.672 - F1: 0.603 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/Andrei95/autotrain-jobbert15-3669997969 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("Andrei95/autotrain-jobbert15-3669997969", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("Andrei95/autotrain-jobbert15-3669997969", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
AdapterHub/roberta-base-pf-mit_movie_trivia
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "token-classification", "adapterhub:ner/mit_movie_trivia" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Freeway-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Freeway-v5 type: Freeway-v5 metrics: - type: mean_reward value: 22.20 +/- 1.08 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Freeway-v5** This is a trained model of a PPO agent playing Freeway-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Freeway-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Freeway-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Freeway-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Freeway-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AdapterHub/roberta-base-pf-pmb_sem_tagging
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "token-classification", "adapterhub:semtag/pmb" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - DemonAttack-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: DemonAttack-v5 type: DemonAttack-v5 metrics: - type: mean_reward value: 113614.00 +/- 2737.82 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **DemonAttack-v5** This is a trained model of a PPO agent playing DemonAttack-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id DemonAttack-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/DemonAttack-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id DemonAttack-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'DemonAttack-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AdapterHub/roberta-base-pf-quail
[ "roberta", "en", "dataset:quail", "arxiv:2104.08247", "adapter-transformers" ]
null
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: Movie_Review_Sentiment_Analysis results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Movie_Review_Sentiment_Analysis This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2865 - Accuracy: 0.8986 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.2923 | 1.0 | 2500 | 0.2865 | 0.8986 | | 0.1782 | 2.0 | 5000 | 0.3732 | 0.903 | | 0.0819 | 3.0 | 7500 | 0.4211 | 0.9164 | | 0.0434 | 4.0 | 10000 | 0.4677 | 0.9176 | | 0.0106 | 5.0 | 12500 | 0.5555 | 0.9216 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
AethiQs-Max/aethiqs-base_bertje-data_rotterdam-epochs_10
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - Solaris-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Solaris-v5 type: Solaris-v5 metrics: - type: mean_reward value: 1604.00 +/- 1006.97 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Solaris-v5** This is a trained model of a PPO agent playing Solaris-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Solaris-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Solaris-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Solaris-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AethiQs-Max/aethiqs-base_bertje-data_rotterdam-epochs_30-epoch_30
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Solaris-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Solaris-v5 type: Solaris-v5 metrics: - type: mean_reward value: 2300.00 +/- 721.17 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Solaris-v5** This is a trained model of a PPO agent playing Solaris-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Solaris-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Solaris-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Solaris-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Solaris-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Ahmad/parsT5
[ "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: validation args: conll2003 metrics: - name: Precision type: precision value: 0.9480193882667558 - name: Recall type: recall value: 0.9545607539548974 - name: F1 type: f1 value: 0.9512788259958073 - name: Accuracy type: accuracy value: 0.9917448697480628 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0400 - Precision: 0.9480 - Recall: 0.9546 - F1: 0.9513 - Accuracy: 0.9917 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0529 | 1.0 | 1756 | 0.0418 | 0.9390 | 0.9423 | 0.9406 | 0.9901 | | 0.0197 | 2.0 | 3512 | 0.0436 | 0.9338 | 0.9493 | 0.9415 | 0.9904 | | 0.0109 | 3.0 | 5268 | 0.0400 | 0.9480 | 0.9546 | 0.9513 | 0.9917 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
AidenGO/KDXF_Bert4MaskedLM
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - Surround-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Surround-v5 type: Surround-v5 metrics: - type: mean_reward value: -2.70 +/- 4.08 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Surround-v5** This is a trained model of a PPO agent playing Surround-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Surround-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Surround-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Surround-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Surround-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Akash7897/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- tags: - WizardOfWor-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: WizardOfWor-v5 type: WizardOfWor-v5 metrics: - type: mean_reward value: 10170.00 +/- 7145.08 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **WizardOfWor-v5** This is a trained model of a PPO agent playing WizardOfWor-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id WizardOfWor-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/WizardOfWor-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/WizardOfWor-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/WizardOfWor-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id WizardOfWor-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'WizardOfWor-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Akashamba/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - YarsRevenge-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: YarsRevenge-v5 type: YarsRevenge-v5 metrics: - type: mean_reward value: 75440.50 +/- 9320.18 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **YarsRevenge-v5** This is a trained model of a PPO agent playing YarsRevenge-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id YarsRevenge-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id YarsRevenge-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'YarsRevenge-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Akashpb13/Central_kurdish_xlsr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ckb", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - YarsRevenge-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: YarsRevenge-v5 type: YarsRevenge-v5 metrics: - type: mean_reward value: 91629.60 +/- 9983.31 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **YarsRevenge-v5** This is a trained model of a PPO agent playing YarsRevenge-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id YarsRevenge-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/YarsRevenge-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id YarsRevenge-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'YarsRevenge-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Aklily/Lilys
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model KaiNylund/2017-m0-finetuned-gpt2-124M is restricted and you are not in the authorized list. Visit https://huggingface.co/KaiNylund/2017-m0-finetuned-gpt2-124M to ask for access.
AkshatSurolia/BEiT-FaceMask-Finetuned
[ "pytorch", "beit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "BeitForImageClassification" ], "model_type": "beit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
239
null
--- tags: - Zaxxon-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Zaxxon-v5 type: Zaxxon-v5 metrics: - type: mean_reward value: 19920.00 +/- 3871.90 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Zaxxon-v5** This is a trained model of a PPO agent playing Zaxxon-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Zaxxon-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Zaxxon-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Zaxxon-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Zaxxon-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Zaxxon-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Zaxxon-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AkshaySg/GrammarCorrection
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Tennis-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Tennis-v5 type: Tennis-v5 metrics: - type: mean_reward value: -1.80 +/- 2.44 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Tennis-v5** This is a trained model of a PPO agent playing Tennis-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Tennis-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Tennis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Tennis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Tennis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Tennis-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Tennis-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlanDev/dall-e-better
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Asterix-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Asterix-v5 type: Asterix-v5 metrics: - type: mean_reward value: 13460.00 +/- 8569.39 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Asterix-v5** This is a trained model of a PPO agent playing Asterix-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Asterix-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Asterix-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Asterix-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Asterix-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Asterix-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Asterix-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Aleksandar/bert-srb-ner
[ "pytorch", "bert", "token-classification", "dataset:wikiann", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - Asteroids-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Asteroids-v5 type: Asteroids-v5 metrics: - type: mean_reward value: 2855.00 +/- 1100.61 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Asteroids-v5** This is a trained model of a PPO agent playing Asteroids-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Asteroids-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Asteroids-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Asteroids-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Asteroids-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Asteroids-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Asteroids-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Aleksandar/electra-srb-oscar
[ "pytorch", "electra", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - Alien-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Alien-v5 type: Alien-v5 metrics: - type: mean_reward value: 1673.00 +/- 591.90 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Alien-v5** This is a trained model of a PPO agent playing Alien-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Alien-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Alien-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Alien-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Aleksandar1932/distilgpt2-rock
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - Alien-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Alien-v5 type: Alien-v5 metrics: - type: mean_reward value: 1522.00 +/- 419.80 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Alien-v5** This is a trained model of a PPO agent playing Alien-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Alien-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Alien-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Alien-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Alien-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlekseyKulnevich/Pegasus-HeaderGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - UpNDown-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: UpNDown-v5 type: UpNDown-v5 metrics: - type: mean_reward value: 280919.00 +/- 14355.34 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **UpNDown-v5** This is a trained model of a PPO agent playing UpNDown-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id UpNDown-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id UpNDown-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'UpNDown-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlekseyKulnevich/Pegasus-QuestionGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- tags: - BattleZone-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: BattleZone-v5 type: BattleZone-v5 metrics: - type: mean_reward value: 29600.00 +/- 7269.11 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **BattleZone-v5** This is a trained model of a PPO agent playing BattleZone-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id BattleZone-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed1/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id BattleZone-v5 --seed 1 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'BattleZone-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 1, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlekseyKulnevich/Pegasus-Summarization
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - UpNDown-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: UpNDown-v5 type: UpNDown-v5 metrics: - type: mean_reward value: 148898.00 +/- 30046.15 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **UpNDown-v5** This is a trained model of a PPO agent playing UpNDown-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id UpNDown-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/UpNDown-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id UpNDown-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'UpNDown-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Alessandro/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-02-22T23:30:50Z
--- tags: - BattleZone-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: BattleZone-v5 type: BattleZone-v5 metrics: - type: mean_reward value: 32600.00 +/- 4200.00 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **BattleZone-v5** This is a trained model of a PPO agent playing BattleZone-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id BattleZone-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/BattleZone-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id BattleZone-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'BattleZone-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlexN/xls-r-300m-pt
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "pt", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "robust-speech-event", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- tags: - Bowling-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Bowling-v5 type: Bowling-v5 metrics: - type: mean_reward value: 38.10 +/- 2.70 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Bowling-v5** This is a trained model of a PPO agent playing Bowling-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Bowling-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Bowling-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Bowling-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Bowling-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Bowling-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Bowling-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlexaMerens/Owl
[ "license:cc" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-02-22T23:33:30Z
--- tags: - Berzerk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Berzerk-v5 type: Berzerk-v5 metrics: - type: mean_reward value: 1038.00 +/- 228.46 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Berzerk-v5** This is a trained model of a PPO agent playing Berzerk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Berzerk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed2/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Berzerk-v5 --seed 2 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Berzerk-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 2, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AlexaRyck/KEITH
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Berzerk-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Berzerk-v5 type: Berzerk-v5 metrics: - type: mean_reward value: 1389.00 +/- 331.50 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Berzerk-v5** This is a trained model of a PPO agent playing Berzerk-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Berzerk-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Berzerk-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Berzerk-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Berzerk-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
AliPotter24/a
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Atlantis-v5 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Atlantis-v5 type: Atlantis-v5 metrics: - type: mean_reward value: 928890.00 +/- 30089.45 name: mean_reward verified: false --- # (CleanRL) **PPO** Agent Playing **Atlantis-v5** This is a trained model of a PPO agent playing Atlantis-v5. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py). ## Get Started To use this model, please install the `cleanrl` package with the following command: ``` pip install "cleanrl[jax,envpool,atari]" python -m cleanrl_utils.enjoy --exp-name cleanba_ppo_envpool_impala_atari_wrapper_naturecnn --env-id Atlantis-v5 ``` Please refer to the [documentation](https://docs.cleanrl.dev/get-started/zoo/) for more detail. ## Command to reproduce the training ```bash curl -OL https://huggingface.co/cleanrl/Atlantis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py curl -OL https://huggingface.co/cleanrl/Atlantis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/pyproject.toml curl -OL https://huggingface.co/cleanrl/Atlantis-v5-cleanba_ppo_envpool_impala_atari_wrapper_naturecnn-seed3/raw/main/poetry.lock poetry install --all-extras python cleanba_ppo_envpool_impala_atari_wrapper_naturecnn.py --distributed --learner-device-ids 1 --track --wandb-project-name cleanba --save-model --upload-model --hf-entity cleanrl --env-id Atlantis-v5 --seed 3 ``` # Hyperparameters ```python {'actor_device_ids': [0], 'actor_devices': ['gpu:0'], 'anneal_lr': True, 'async_batch_size': 20, 'async_update': 3, 'batch_size': 15360, 'capture_video': False, 'clip_coef': 0.1, 'cuda': True, 'distributed': True, 'ent_coef': 0.01, 'env_id': 'Atlantis-v5', 'exp_name': 'cleanba_ppo_envpool_impala_atari_wrapper_naturecnn', 'gae_lambda': 0.95, 'gamma': 0.99, 'global_learner_decices': ['gpu:1', 'gpu:3'], 'hf_entity': 'cleanrl', 'learner_device_ids': [1], 'learner_devices': ['gpu:1'], 'learning_rate': 0.00025, 'local_batch_size': 7680, 'local_minibatch_size': 1920, 'local_num_envs': 60, 'local_rank': 0, 'max_grad_norm': 0.5, 'minibatch_size': 3840, 'norm_adv': True, 'num_envs': 120, 'num_minibatches': 4, 'num_steps': 128, 'num_updates': 3255, 'profile': False, 'save_model': True, 'seed': 3, 'target_kl': None, 'test_actor_learner_throughput': False, 'torch_deterministic': True, 'total_timesteps': 50000000, 'track': True, 'update_epochs': 4, 'upload_model': True, 'vf_coef': 0.5, 'wandb_entity': None, 'wandb_project_name': 'cleanba', 'world_size': 2} ```
Andres2015/HiggingFaceTest
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - race language: - en library_name: transformers pipeline_tag: text2text-generation inference: false --- # t5-large fine-tuned to RACE for Generating Distractors - Input: `question <sep> answer <sep> context` - Output: list of 3 distractors ## Model Details t5-large model is fine-tuned to the RACE dataset where the input is the concatenation of (question, answer, context) and the output is a list of 3 distractors. This is the second component in the question generation pipeline (i.e. `g2`) in our [MQAG paper](https://arxiv.org/abs/2301.12307), or please refer to the GitHub repo of this project: https://github.com/potsawee/mqag0. ## How to Use the Model Use the code below to get started with the model. ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("potsawee/t5-large-generation-race-Distractor") >>> model = AutoModelForSeq2SeqLM.from_pretrained("potsawee/t5-large-generation-race-Distractor") >>> context = r""" ... World number one Novak Djokovic says he is hoping for a "positive decision" to allow him ... to play at Indian Wells and the Miami Open next month. The United States has extended ... its requirement for international visitors to be vaccinated against Covid-19. Proof of vaccination ... will be required to enter the country until at least 10 April, but the Serbian has previously ... said he is unvaccinated. The 35-year-old has applied for special permission to enter the country. ... Indian Wells and the Miami Open - two of the most prestigious tournaments on the tennis calendar ... outside the Grand Slams - start on 6 and 20 March respectively. Djokovic says he will return to ... the ATP tour in Dubai next week after claiming a record-extending 10th Australian Open title ... and a record-equalling 22nd Grand Slam men's title last month.""".replace("\n", "") >>> question = "What is the best title for the passage?" >>> answer = "Djokovic's application for special permission to enter the United States" >>> input_text = " ".join([question, tokenizer.sep_token, answer, tokenizer.sep_token, context]) >>> inputs = tokenizer(input_text, return_tensors="pt") >>> outputs = model.generate(**inputs, max_new_tokens=128) >>> distractors = tokenizer.decode(outputs[0], skip_special_tokens=False) >>> distractors = distractors.replace(tokenizer.pad_token, "").replace(tokenizer.eos_token, "") >>> distractors = [y.strip() for y in distractors.split(tokenizer.sep_token)] >>> print(distractors) ['The United States has extended its requirement for international visitors to be vaccinated against Covid-19', "Djokovic's return to the ATP tour in Dubai", "Djokovic's hope for a positive decision to allow him to play at Indian Wells and the Miami Open"] ``` ## Citation ```bibtex @article{manakul2023mqag, title={MQAG: Multiple-choice Question Answering and Generation for Assessing Information Consistency in Summarization}, author={Manakul, Potsawee and Liusie, Adian and Gales, Mark JF}, journal={arXiv preprint arXiv:2301.12307}, year={2023} } ```
Andrija/SRoBERTa-XL-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - recall - precision model-index: - name: electra-base-emotion-Tweet_About_Disaster_Or_Not results: [] language: - en --- # electra-base-emotion-Tweet_About_Disaster_Or_Not This model is a fine-tuned version of [bhadresh-savani/electra-base-emotion](https://huggingface.co/bhadresh-savani/electra-base-emotion) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3276 - Accuracy: 0.8857 - F1: 0.7246 - Recall: 0.7991 - Precision: 0.6628 ## Model description This is a binary classification model to determine if tweet input samples are about a disaster or not. For more information on how it was created, check out the following link: https://github.com/DunnBC22/NLP_Projects/blob/main/Binary%20Classification/Transformer%20Comparison/Is%20This%20Tweet%20Referring%20to%20a%20Disaster%20or%20Not%3F%20-%20ELECTRA.ipynb ### Associated Projects This project is part of a comparison of multiple transformers. The others can be found at the following links: - https://huggingface.co/DunnBC22/roberta-base-Tweet_About_Disaster_Or_Not - https://huggingface.co/DunnBC22/deberta-v3-small-Tweet_About_Disaster_Or_Not - https://huggingface.co/DunnBC22/albert-base-v2-Tweet_About_Disaster_Or_Not - https://huggingface.co/DunnBC22/ernie-2.0-base-en-Tweet_About_Disaster_Or_Not - https://huggingface.co/DunnBC22/distilbert-base-uncased-Tweet_About_Disaster_Or_Not ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. The main limitation is the quality of the data source. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/vstepanenko/disaster-tweets ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.4633 | 1.0 | 143 | 0.3938 | 0.8373 | 0.6490 | 0.7991 | 0.5463 | | 0.3566 | 2.0 | 286 | 0.3551 | 0.8575 | 0.6860 | 0.8271 | 0.5861 | | 0.3078 | 3.0 | 429 | 0.3416 | 0.8909 | 0.7220 | 0.7523 | 0.6940 | | 0.2813 | 4.0 | 572 | 0.3276 | 0.8857 | 0.7246 | 0.7991 | 0.6628 | | 0.2592 | 5.0 | 715 | 0.3279 | 0.8892 | 0.7273 | 0.7850 | 0.6774 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1 - Datasets 2.9.0 - Tokenizers 0.12.1
Andrija/SRoBERTa-XL
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:srwac", "dataset:leipzig", "dataset:cc100", "dataset:hrwac", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
54
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # tags-allnli-GroNLP-bert-base-dutch-cased This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained(textgain/tags-allnli-GroNLP-bert-base-dutch-cased) model = AutoModel.from_pretrained(textgain/tags-allnli-GroNLP-bert-base-dutch-cased) # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 4687 with parameters: ``` {'batch_size': 128} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 5e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 3000, "warmup_steps": 300.0, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Anirbanbhk/Hate-speech-Pretrained-movies
[ "tf", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 287.95 +/- 13.15 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/AR_rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 12.50 +/- 5.67 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r rahul-t-p/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AnonymousSub/AR_rule_based_roberta_hier_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 --- # Introduction Libraries in this repository are intended for use in https://github.com/k2-fsa/sherpa-onnx They are downloaded from https://mvnrepository.com/artifact/com.microsoft.onnxruntime/onnxruntime-android/1.14.0 ``` wget https://repo1.maven.org/maven2/com/microsoft/onnxruntime/onnxruntime-android/1.14.0/onnxruntime-android-1.14.0.aar mv onnxruntime-android-1.14.0.aar onnxruntime-android-1.14.0.zip unzip onnxruntime-android-1.14.0.zip cd onnxruntime-android-1.14.0 tree . ``` ``` . ├── AndroidManifest.xml ├── R.txt ├── arm64-v8a ├── armeabi-v7a ├── classes.jar ├── headers │   ├── cpu_provider_factory.h │   ├── nnapi_provider_factory.h │   ├── onnxruntime_c_api.h │   ├── onnxruntime_cxx_api.h │   └── onnxruntime_cxx_inline.h ├── jni │   ├── arm64-v8a │   │   ├── libonnxruntime.so │   │   └── libonnxruntime4j_jni.so │   ├── armeabi-v7a │   │   ├── libonnxruntime.so │   │   └── libonnxruntime4j_jni.so │   ├── x86 │   │   ├── libonnxruntime.so │   │   └── libonnxruntime4j_jni.so │   └── x86_64 │   ├── libonnxruntime.so │   └── libonnxruntime4j_jni.so ├── x86 └── x86_64 10 directories, 16 files ```
AnonymousSub/AR_rule_based_roberta_hier_triplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: SD-sentiment-model-BERT-v1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # SD-sentiment-model-BERT-v1 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0686 - Train Accuracy: 0.9786 - Validation Loss: 0.4576 - Validation Accuracy: 0.8860 - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': 1.0, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 3e-05, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.3289 | 0.8543 | 0.3989 | 0.8560 | 0 | | 0.0686 | 0.9786 | 0.4576 | 0.8860 | 1 | ### Framework versions - Transformers 4.26.0 - TensorFlow 2.11.0 - Datasets 2.9.0 - Tokenizers 0.13.2
AnonymousSub/AR_rule_based_roberta_only_classfn_twostage_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: kobert-finetuned-review results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kobert-finetuned-review This model is a fine-tuned version of [skt/kobert-base-v1](https://huggingface.co/skt/kobert-base-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2235 - Accuracy: 0.92 - F1: 0.9245 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 160 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 0.99 | 93 | 0.1984 | 0.926 | 0.9267 | | No log | 1.99 | 186 | 0.2235 | 0.92 | 0.9245 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
AnonymousSub/SR_EManuals-RoBERTa
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit tags: - NLP datasets: - Yaxin/SemEval2014Task4Raw metrics: - f1 - precision - recall pipeline_tag: text2text-generation --- # ate_tk-instruct-base-def-pos-neg-neut-restaurants This model is finetuned for the Aspect Term Extraction (ATE) subtask. The finetuning was carried out by adding prompts of the form: - definition + 2 positive examples + 2 negative examples + 2 neutral examples. The prompt is prepended onto each input review. It is important to note that **this model output was finetuned on samples from the restaurants domains.** The code for the official implementation of the paper [**InstructABSA: Instruction Learning for Aspect Based Sentiment Analysis**](https://arxiv.org/abs/2302.08624) can be found [here](https://github.com/kevinscaria/InstructABSA). For the ATE subtask, this model is the current SOTA. ## Training data InstructABSA models are trained on the benchmark dataset for Aspect Based Sentiment Analysis tasks viz. SemEval 2014. This [dataset](https://alt.qcri.org/semeval2014/task4/index.php?id=data-and-tools) consists of reviews from laptops and restaurant domains and their corresponding aspect term and polarity labels. ### BibTeX entry and citation info If you use this model in your work, please cite the following paper: ```bibtex @inproceedings{Scaria2023InstructABSAIL, title={InstructABSA: Instruction Learning for Aspect Based Sentiment Analysis}, author={Kevin Scaria and Himanshu Gupta and Saurabh Arjun Sawant and Swaroop Mishra and Chitta Baral}, year={2023} } ```
AnonymousSub/T5_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - 10th_science_tamil_to_english model-index: - name: 10th_science_ta_to_eng results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 10th_science_ta_to_eng This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the 10th_science_tamil_to_english dataset. It achieves the following results on the evaluation set: - eval_loss: 4.2536 - eval_wer: 133.3041 - eval_runtime: 194.5706 - eval_samples_per_second: 1.994 - eval_steps_per_second: 0.128 - epoch: 13.0 - step: 5000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.10.2.dev0 - Tokenizers 0.13.2
AnonymousSub/bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: bert-base-uncased-ag-news-finetuned-dwnews-categories results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-ag-news-finetuned-dwnews-categories This model is a fine-tuned version of [nateraw/bert-base-uncased-ag-news](https://huggingface.co/nateraw/bert-base-uncased-ag-news) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.8659 - Accuracy: 0.74 - Precision Weighted: 0.7435 - Precision Macro: 0.7557 - Recall Weighted: 0.74 - Recall Macro: 0.7304 - F1 Weighted: 0.7294 - F1 Macro: 0.7250 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision Weighted | Precision Macro | Recall Weighted | Recall Macro | F1 Weighted | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------------------:|:---------------:|:---------------:|:------------:|:-----------:|:--------:| | 1.8428 | 0.25 | 50 | 1.5099 | 0.53 | 0.5008 | 0.4655 | 0.53 | 0.3544 | 0.4363 | 0.3246 | | 1.3134 | 0.5 | 100 | 1.2200 | 0.605 | 0.4765 | 0.4247 | 0.605 | 0.4681 | 0.5207 | 0.4261 | | 1.1562 | 0.75 | 150 | 1.0473 | 0.68 | 0.7007 | 0.6603 | 0.68 | 0.5534 | 0.6483 | 0.5507 | | 1.0008 | 1.0 | 200 | 0.9491 | 0.67 | 0.6281 | 0.5935 | 0.67 | 0.5881 | 0.6390 | 0.5778 | | 0.8173 | 1.25 | 250 | 0.9218 | 0.7 | 0.7028 | 0.6599 | 0.7 | 0.6694 | 0.6874 | 0.6417 | | 0.8385 | 1.5 | 300 | 0.8900 | 0.715 | 0.7250 | 0.7131 | 0.715 | 0.6600 | 0.7059 | 0.6637 | | 0.6988 | 1.75 | 350 | 0.9198 | 0.7 | 0.6941 | 0.6875 | 0.7 | 0.6825 | 0.6866 | 0.6704 | | 0.6851 | 2.0 | 400 | 0.8607 | 0.72 | 0.7248 | 0.7310 | 0.72 | 0.6978 | 0.7067 | 0.6965 | | 0.548 | 2.25 | 450 | 0.8659 | 0.74 | 0.7435 | 0.7557 | 0.74 | 0.7304 | 0.7294 | 0.7250 | | 0.4898 | 2.5 | 500 | 0.9184 | 0.73 | 0.7379 | 0.7079 | 0.73 | 0.7599 | 0.7229 | 0.7203 | | 0.5683 | 2.75 | 550 | 0.9207 | 0.72 | 0.7188 | 0.7089 | 0.72 | 0.7429 | 0.7150 | 0.7195 | | 0.4971 | 3.0 | 600 | 0.9256 | 0.72 | 0.7257 | 0.7104 | 0.72 | 0.7384 | 0.7141 | 0.7126 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
AnonymousSub/cline-emanuals-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- pipeline_tag: image-classification library_name: fastai ---
AnonymousSub/cline-emanuals-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: rl-algo-impls tags: - procgen-starpilot-easy - ppo - deep-reinforcement-learning - reinforcement-learning model-index: - name: ppo results: - metrics: - type: mean_reward value: 33.2 +/- 22.75 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: procgen-starpilot-easy type: procgen-starpilot-easy --- # **PPO** Agent playing **procgen-starpilot-easy** This is a trained model of a **PPO** agent playing **procgen-starpilot-easy** using the [/sgoodfriend/rl-algo-impls](https://github.com/sgoodfriend/rl-algo-impls) repo. All models trained at this commit can be found at https://api.wandb.ai/links/sgoodfriend/f3w1hwyb. ## Training Results This model was trained from 3 trainings of **PPO** agents using different initial seeds. These agents were trained by checking out [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). The best and last models were kept from each training. This submission has loaded the best models from each training, reevaluates them, and selects the best model from these latest evaluations (mean - std). | algo | env | seed | reward_mean | reward_std | eval_episodes | best | wandb_url | |:-------|:----------|-------:|--------------:|-------------:|----------------:|:-------|:-----------------------------------------------------------------------------| | ppo | starpilot | 1 | 29.375 | 21.989 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/sbgk2hzd) | | ppo | starpilot | 2 | 28.3438 | 19.8622 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/11cs8fmu) | | ppo | starpilot | 3 | 33.2031 | 22.7536 | 64 | * | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/bvevxrot) | ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. ## Usage /sgoodfriend/rl-algo-impls: https://github.com/sgoodfriend/rl-algo-impls Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path=sgoodfriend/rl-algo-impls-benchmarks/bvevxrot ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). While training is deterministic, different hardware will give different results. ``` python train.py --algo ppo --env procgen-starpilot-easy --seed 3 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. ## Benchmarking (with Lambda Labs instance) This and other models from https://api.wandb.ai/links/sgoodfriend/f3w1hwyb were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone [email protected]:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. ## Hyperparameters This isn't exactly the format of hyperparams in hyperparams/ppo.yml, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` algo: ppo algo_hyperparams: batch_size: 2048 clip_range: 0.2 clip_range_vf: 0.2 ent_coef: 0.01 gae_lambda: 0.95 gamma: 0.999 learning_rate: 0.0005 n_epochs: 3 n_steps: 256 vf_coef: 0.5 env: procgen-starpilot-easy env_hyperparams: is_procgen: true make_kwargs: distribution_mode: easy n_envs: 64 normalize: true env_id: starpilot eval_params: deterministic: false ignore_first_episode: true n_timesteps: 25000000 policy_hyperparams: activation_fn: relu cnn_feature_dim: 256 cnn_layers_init_orthogonal: false cnn_style: impala init_layers_orthogonal: true seed: 3 use_deterministic_algorithms: true wandb_entity: null wandb_project_name: rl-algo-impls-benchmarks wandb_tags: - benchmark_21ee1ab - host_138-2-238-100 ```
AnonymousSub/cline_emanuals
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "LecbertForPreTraining" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: rl-algo-impls tags: - procgen-bigfish-easy - ppo - deep-reinforcement-learning - reinforcement-learning model-index: - name: ppo results: - metrics: - type: mean_reward value: 23.64 +/- 16.3 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: procgen-bigfish-easy type: procgen-bigfish-easy --- # **PPO** Agent playing **procgen-bigfish-easy** This is a trained model of a **PPO** agent playing **procgen-bigfish-easy** using the [/sgoodfriend/rl-algo-impls](https://github.com/sgoodfriend/rl-algo-impls) repo. All models trained at this commit can be found at https://api.wandb.ai/links/sgoodfriend/f3w1hwyb. ## Training Results This model was trained from 3 trainings of **PPO** agents using different initial seeds. These agents were trained by checking out [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). The best and last models were kept from each training. This submission has loaded the best models from each training, reevaluates them, and selects the best model from these latest evaluations (mean - std). | algo | env | seed | reward_mean | reward_std | eval_episodes | best | wandb_url | |:-------|:--------|-------:|--------------:|-------------:|----------------:|:-------|:-----------------------------------------------------------------------------| | ppo | bigfish | 1 | 23.6406 | 16.3041 | 64 | * | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/mg5h9dhc) | | ppo | bigfish | 2 | 18.0469 | 14.8959 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/r6oxvsed) | | ppo | bigfish | 3 | 16.7656 | 17.2335 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/udygbbmi) | ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. ## Usage /sgoodfriend/rl-algo-impls: https://github.com/sgoodfriend/rl-algo-impls Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path=sgoodfriend/rl-algo-impls-benchmarks/mg5h9dhc ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). While training is deterministic, different hardware will give different results. ``` python train.py --algo ppo --env procgen-bigfish-easy --seed 1 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. ## Benchmarking (with Lambda Labs instance) This and other models from https://api.wandb.ai/links/sgoodfriend/f3w1hwyb were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone [email protected]:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. ## Hyperparameters This isn't exactly the format of hyperparams in hyperparams/ppo.yml, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` algo: ppo algo_hyperparams: batch_size: 2048 clip_range: 0.2 clip_range_vf: 0.2 ent_coef: 0.01 gae_lambda: 0.95 gamma: 0.999 learning_rate: 0.0005 n_epochs: 3 n_steps: 256 vf_coef: 0.5 env: procgen-bigfish-easy env_hyperparams: is_procgen: true make_kwargs: distribution_mode: easy n_envs: 64 normalize: true env_id: bigfish eval_params: deterministic: false ignore_first_episode: true n_timesteps: 25000000 policy_hyperparams: activation_fn: relu cnn_feature_dim: 256 cnn_layers_init_orthogonal: false cnn_style: impala init_layers_orthogonal: true seed: 1 use_deterministic_algorithms: true wandb_entity: null wandb_project_name: rl-algo-impls-benchmarks wandb_tags: - benchmark_21ee1ab - host_138-2-238-100 ```
AnonymousSub/cline_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: rl-algo-impls tags: - procgen-bossfight-easy - ppo - deep-reinforcement-learning - reinforcement-learning model-index: - name: ppo results: - metrics: - type: mean_reward value: 9.91 +/- 5.37 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: procgen-bossfight-easy type: procgen-bossfight-easy --- # **PPO** Agent playing **procgen-bossfight-easy** This is a trained model of a **PPO** agent playing **procgen-bossfight-easy** using the [/sgoodfriend/rl-algo-impls](https://github.com/sgoodfriend/rl-algo-impls) repo. All models trained at this commit can be found at https://api.wandb.ai/links/sgoodfriend/f3w1hwyb. ## Training Results This model was trained from 3 trainings of **PPO** agents using different initial seeds. These agents were trained by checking out [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). The best and last models were kept from each training. This submission has loaded the best models from each training, reevaluates them, and selects the best model from these latest evaluations (mean - std). | algo | env | seed | reward_mean | reward_std | eval_episodes | best | wandb_url | |:-------|:----------|-------:|--------------:|-------------:|----------------:|:-------|:-----------------------------------------------------------------------------| | ppo | bossfight | 1 | 8.03125 | 6.37125 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/ok9cp59v) | | ppo | bossfight | 2 | 9.90625 | 5.36982 | 64 | * | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/goavynh9) | | ppo | bossfight | 3 | 8.98438 | 5.94635 | 64 | | [wandb](https://wandb.ai/sgoodfriend/rl-algo-impls-benchmarks/runs/b5yxrur0) | ### Prerequisites: Weights & Biases (WandB) Training and benchmarking assumes you have a Weights & Biases project to upload runs to. By default training goes to a rl-algo-impls project while benchmarks go to rl-algo-impls-benchmarks. During training and benchmarking runs, videos of the best models and the model weights are uploaded to WandB. Before doing anything below, you'll need to create a wandb account and run `wandb login`. ## Usage /sgoodfriend/rl-algo-impls: https://github.com/sgoodfriend/rl-algo-impls Note: While the model state dictionary and hyperaparameters are saved, the latest implementation could be sufficiently different to not be able to reproduce similar results. You might need to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). ``` # Downloads the model, sets hyperparameters, and runs agent for 3 episodes python enjoy.py --wandb-run-path=sgoodfriend/rl-algo-impls-benchmarks/goavynh9 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_enjoy.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_enjoy.ipynb) notebook. ## Training If you want the highest chance to reproduce these results, you'll want to checkout the commit the agent was trained on: [21ee1ab](https://github.com/sgoodfriend/rl-algo-impls/tree/21ee1ab96a186676e5ed2f8c3185902f7c7bca7a). While training is deterministic, different hardware will give different results. ``` python train.py --algo ppo --env procgen-bossfight-easy --seed 2 ``` Setup hasn't been completely worked out yet, so you might be best served by using Google Colab starting from the [colab_train.ipynb](https://github.com/sgoodfriend/rl-algo-impls/blob/main/colab_train.ipynb) notebook. ## Benchmarking (with Lambda Labs instance) This and other models from https://api.wandb.ai/links/sgoodfriend/f3w1hwyb were generated by running a script on a Lambda Labs instance. In a Lambda Labs instance terminal: ``` git clone [email protected]:sgoodfriend/rl-algo-impls.git cd rl-algo-impls bash ./lambda_labs/setup.sh wandb login bash ./lambda_labs/benchmark.sh ``` ### Alternative: Google Colab Pro+ As an alternative, [colab_benchmark.ipynb](https://github.com/sgoodfriend/rl-algo-impls/tree/main/benchmarks#:~:text=colab_benchmark.ipynb), can be used. However, this requires a Google Colab Pro+ subscription and running across 4 separate instances because otherwise running all jobs will exceed the 24-hour limit. ## Hyperparameters This isn't exactly the format of hyperparams in hyperparams/ppo.yml, but instead the Wandb Run Config. However, it's very close and has some additional data: ``` algo: ppo algo_hyperparams: batch_size: 2048 clip_range: 0.2 clip_range_vf: 0.2 ent_coef: 0.01 gae_lambda: 0.95 gamma: 0.999 learning_rate: 0.0005 n_epochs: 3 n_steps: 256 vf_coef: 0.5 env: procgen-bossfight-easy env_hyperparams: is_procgen: true make_kwargs: distribution_mode: easy n_envs: 64 normalize: true env_id: bossfight eval_params: deterministic: false ignore_first_episode: true n_timesteps: 25000000 policy_hyperparams: activation_fn: relu cnn_feature_dim: 256 cnn_layers_init_orthogonal: false cnn_style: impala init_layers_orthogonal: true seed: 2 use_deterministic_algorithms: true wandb_entity: null wandb_project_name: rl-algo-impls-benchmarks wandb_tags: - benchmark_21ee1ab - host_138-2-238-100 ```
AnonymousSub/consert-emanuals-s10-SR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: mit tags: - generated_from_trainer datasets: - go_emotions metrics: - f1 model-index: - name: roberta-large-go-emotions-2 results: - task: name: Text Classification type: text-classification dataset: name: go_emotions type: multilabel_classification config: simplified split: test args: simplified metrics: - name: F1 type: f1 value: 0.5180 - task: name: Text Classification type: text-classification dataset: name: go_emotions type: multilabel_classification config: simplified split: validation args: simplified metrics: - name: F1 type: f1 value: 0.5203 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-go-emotions-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the [go_emotions](https://huggingface.co/datasets/go_emotions) dataset. It achieves the following results on the test set (with a threshold of 0.15): - Accuracy: 0.44020 - Precision: 0.5041 - Recall: 0.5461 - F1: 0.5180 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 9 ### Training results | Training Loss | Epoch | Validation Loss | Accuracy | Precision | Recall | F1 | | ------------- | ----- | --------------- | -------- | --------- | ------ | ------ | | No log | 1.0 | 0.0889 | 0.4043 | 0.4807 | 0.4568 | 0.4446 | | 0.1062 | 2.0 | 0.0828 | 0.4113 | 0.4608 | 0.5363 | 0.4868 | | 0.1062 | 3.0 | 0.0813 | 0.4201 | 0.5198 | 0.5612 | 0.5227 | | No log | 4.0 | 0.0862 | 0.4292 | 0.5012 | 0.5558 | 0.5208 | | 0.0597 | 5.0 | 0.0924 | 0.4329 | 0.5164 | 0.5362 | 0.5151 | | 0.0597 | 6.0 | 0.0956 | 0.4445 | 0.5241 | 0.5328 | 0.5161 | | No log | 7.0 | 0.0962 | 0.4648 | 0.5138 | 0.5277 | 0.5151 | | 0.0458 | 8.0 | 0.0962 | 0.4462 | 0.5257 | 0.5270 | 0.5203 | | 0.0458 | 9.0 | 0.1029 | 0.4432 | 0.5076 | 0.5249 | 0.5111 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0 - Datasets 2.1.0 - Tokenizers 0.12.1
AnonymousSub/consert-s10-AR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- tags: - conversational --- # Penny DialoGPT Model
AnonymousSub/declutr-biomed-roberta-papers
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - conversational --- # Penny DialoGPT Model
AnonymousSub/declutr-model-emanuals
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - recall - precision model-index: - name: police-lethal-force-classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # police-lethal-force-classifier This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0087 - Accuracy: 0.9980 - F1-score: 0.9964 - Recall: 0.9965 - Precision: 0.9963 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1-score | Recall | Precision | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:|:------:|:---------:| | 0.0138 | 1.0 | 12050 | 0.0132 | 0.9973 | 0.9951 | 0.9953 | 0.9949 | | 0.0091 | 2.0 | 24100 | 0.0087 | 0.9980 | 0.9964 | 0.9965 | 0.9963 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
AnonymousSub/declutr-model_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - autotrain - summarization language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - zaib32/autotrain-data-bart_jobs_description co2_eq_emissions: emissions: 5.188589459184297 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 3667398231 - CO2 Emissions (in grams): 5.1886 ## Validation Metrics - Loss: 1.129 - Rouge1: 66.484 - Rouge2: 42.519 - RougeL: 53.467 - RougeLsum: 62.459 - Gen Len: 129.500 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/zaib32/autotrain-bart_jobs_description-3667398231 ```
AnonymousSub/declutr-model_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.53 +/- 17.26 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/declutr-s10-SR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
2023-02-23T08:17:57Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: Michunie/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/dummy_2
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
39
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: iubeda/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AnonymousSub/hier_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: sentiment_test23feb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment_test23feb This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5188 - Y True: [2 2 1 0 1 2 0 1 0 2 0 1 1 0 1 2 1 2 1 1 2 2 0 0 2 1 1 2 0 0 0 0 1 2 2 0 1 1 1 2 1 1 0 0 2 1 2 0 1 2 2 2 1 2 1 2 0 1 1 2 0 1 1 1 0 0 1 1 1 0 0 0 2 0 1 1 0 1 2 0 0 1 0 2 1 1 0 0 1 0 2 0 1 1 0 2 1 0 2 2 2 0 1 0 1 0 1 0 0 1 1 1 1 2 1 2 0 1 2 0 0 0 2 1 1 0 1 0 1 0 2 0 0 1 1 1 1 0 0 1 0 2 1 1 0 1 1 1 1 2 0 1 1 2 1 2 2 1 2 1 0 2 0 0 0 0 2 0 0 0 2 1 0 2 1 0 2 0 0 2 0 1 0 2 2 1 1 0 1 0 2 1 0 0 0 2 2 1 0 1 0 0 0 2 1 0 1 2 0 2 1 1 2 1 1 2 0 0 1 0 1 2 2 2 1 1 0 2 1 0] - Y Pred: [0 1 1 0 1 2 0 2 0 1 0 1 2 0 1 1 1 1 1 1 2 1 0 0 1 1 1 2 0 0 0 0 1 0 2 0 1 1 1 2 1 1 1 0 2 1 1 0 1 2 2 2 1 0 1 2 0 1 1 2 0 1 2 1 0 0 1 1 1 0 0 0 2 0 1 1 2 1 2 0 0 0 0 2 1 1 0 0 1 0 2 0 1 1 0 2 1 0 1 2 0 0 1 0 1 0 1 0 0 1 2 2 1 2 1 2 0 1 2 0 0 0 0 2 0 0 1 0 1 0 1 0 0 1 1 1 1 0 0 1 0 1 1 1 0 2 1 1 1 2 0 1 1 2 1 2 1 1 2 1 0 2 0 0 0 0 2 0 0 0 1 1 0 2 1 2 0 0 0 2 0 1 2 2 2 1 1 0 1 2 0 1 0 0 0 0 2 1 0 1 0 0 0 0 1 0 1 1 0 2 1 1 2 1 1 1 0 0 1 0 1 0 1 0 1 1 0 2 1 2] - Accuracy: 0.8217 - F1: 0.8146 - Precision: 0.8154 - Recall: 0.8217 - Confusion Matrix: [[76 1 5] [ 2 79 7] [11 15 34]] ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.12.1+cu113 - Datasets 2.9.0 - Tokenizers 0.13.2
AnonymousSub/hier_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - NLP-MINI-PROJECT/rabbi_kook metrics: - rouge model-index: - name: kook-model-output-dir results: - task: name: Summarization type: summarization dataset: name: NLP-MINI-PROJECT/rabbi_kook type: NLP-MINI-PROJECT/rabbi_kook metrics: - name: Rouge1 type: rouge value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kook-model-output-dir This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on the NLP-MINI-PROJECT/rabbi_kook dataset. It achieves the following results on the evaluation set: - Loss: 2.2130 - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 - Gen Len: 42.7877 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.8.1+cu111 - Datasets 2.9.0 - Tokenizers 0.11.0
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
AnonymousSub/rule_based_hier_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: en thumbnail: http://www.huggingtweets.com/arvidkahl-marckohlbrugge-yadavajay/1677143666796/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1504671983799177217/Sx9SN3mk_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1554412013706792961/wMVtjqeE_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1201525049766883328/QPimCC9z_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Ajay Yadav & Marc Köhlbrugge & Arvid Kahl</div> <div style="text-align: center; font-size: 14px;">@arvidkahl-marckohlbrugge-yadavajay</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Ajay Yadav & Marc Köhlbrugge & Arvid Kahl. | Data | Ajay Yadav | Marc Köhlbrugge | Arvid Kahl | | --- | --- | --- | --- | | Tweets downloaded | 3249 | 3248 | 3239 | | Retweets | 282 | 303 | 435 | | Short tweets | 522 | 416 | 457 | | Tweets kept | 2445 | 2529 | 2347 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/k744x91d/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @arvidkahl-marckohlbrugge-yadavajay's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/ycq5gzxc) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/ycq5gzxc/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/arvidkahl-marckohlbrugge-yadavajay') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 9.79 +/- 4.30 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r besa2001/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 136.50 +/- 60.04 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ssw1591 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ssw1591 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga ssw1591 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 10000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: bert-finetuned-ner-per-v8 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner-per-v8 This model is a fine-tuned version of [BeardedJohn/bert-finetuned-ner-ubb-conll-endava-only-misc-v2](https://huggingface.co/BeardedJohn/bert-finetuned-ner-ubb-conll-endava-only-misc-v2) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 846, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results ### Framework versions - Transformers 4.26.1 - TensorFlow 2.11.0 - Datasets 2.10.0 - Tokenizers 0.13.2
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - autotrain - translation language: - unk - unk datasets: - Tritkoman/autotrain-data-oldenglish2 co2_eq_emissions: emissions: 5.451467518019884 --- # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 3680498282 - CO2 Emissions (in grams): 5.4515 ## Validation Metrics - Loss: 3.265 - SacreBLEU: 6.433 - Gen len: 16.747
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2023-02-23T10:08:53Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 12.12 +/- 5.77 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r moodlep/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
AntonClaesson/finetuning_test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: loso_m07_main_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # loso_m07_main_1 This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0752 - Wer: 1.62 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 7 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 9.6242 | 0.96 | 500 | 3.4099 | 1.0 | | 2.6899 | 1.92 | 1000 | 1.6890 | 2.3556 | | 1.0312 | 2.88 | 1500 | 0.3006 | 1.9356 | | 0.3173 | 3.84 | 2000 | 0.1852 | 1.7044 | | 0.1357 | 4.8 | 2500 | 0.1000 | 1.5333 | | 0.079 | 5.76 | 3000 | 0.0877 | 1.6156 | | 0.0559 | 6.72 | 3500 | 0.0752 | 1.62 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.13.1+cu116 - Datasets 1.18.3 - Tokenizers 0.13.2
Antony/mint_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 10.63 +/- 4.89 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r iubeda/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Appolo/TestModel
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.59 +/- 17.80 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```