modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
CLTL/gm-ner-xlmrbase
[ "pytorch", "tf", "xlm-roberta", "token-classification", "nl", "transformers", "dighum", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Hussein_Deliberate_1000steps Dreambooth model trained by HusseinHE with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 273.05 +/- 17.96 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CLTL/icf-levels-etn
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
A tiny version of Zipformer-Transducer (https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/pruned_transducer_stateless7). Number of model parameters: 20697573 Decoding results at epoch-30-avg-9: * greedy_search: 2.67 & 6.4 * modified_beam_search: 2.6 & 6.26 * fast_beam_search: 2.64 & 6.3
CLTL/icf-levels-fac
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
2023-01-28T01:48:37Z
--- license: cc-by-sa-4.0 --- Abstract Expression Machine 30 (AEM30): Stable Diffusion 2 from Stability AI fine-tuned 30 steps on [maximalmargin/mitchell](https://huggingface.co/datasets/maximalmargin/mitchell) dataset.
CLTL/icf-levels-ins
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
2023-01-28T01:48:47Z
--- license: cc-by-sa-4.0 --- Abstract Expression Machine 100 (AEM100): Stable Diffusion 2 from Stability AI fine-tuned 100 steps on [maximalmargin/mitchell](https://huggingface.co/datasets/maximalmargin/mitchell) dataset.
CSResearcher/TestModel
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-28T03:16:19Z
--- language: en thumbnail: http://www.huggingtweets.com/aneternalenigma/1674876241213/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1610259562988294147/Ck8uDVHJ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">🔴LIVE on TWITCH!🔴AnEternalEnigma</div> <div style="text-align: center; font-size: 14px;">@aneternalenigma</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from 🔴LIVE on TWITCH!🔴AnEternalEnigma. | Data | 🔴LIVE on TWITCH!🔴AnEternalEnigma | | --- | --- | | Tweets downloaded | 3235 | | Retweets | 773 | | Short tweets | 320 | | Tweets kept | 2142 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/mw1vu54f/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @aneternalenigma's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/cu1opgto) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/cu1opgto/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/aneternalenigma') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CSZay/bart
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1597709018142855170/e0xfVtT4_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">silly little time</div> <div style="text-align: center; font-size: 14px;">@muzhroommama</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from silly little time. | Data | silly little time | | --- | --- | | Tweets downloaded | 236 | | Retweets | 87 | | Short tweets | 32 | | Tweets kept | 117 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/xaynl4xc/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @muzhroommama's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/x523rtvl) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/x523rtvl/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/muzhroommama') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CZWin32768/xlm-align
[ "pytorch", "xlm-roberta", "fill-mask", "arxiv:2106.06381", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
Access to model shaoncsecu/en_Disease_A_Z_SpaCy is restricted and you are not in the authorized list. Visit https://huggingface.co/shaoncsecu/en_Disease_A_Z_SpaCy to ask for access.
Callidior/bert2bert-base-arxiv-titlegen
[ "pytorch", "safetensors", "encoder-decoder", "text2text-generation", "en", "dataset:arxiv_dataset", "transformers", "summarization", "license:apache-2.0", "autotrain_compatible", "has_space" ]
summarization
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
145
null
--- tags: - conversational --- #42meow DialoGPT Model
CallumRai/HansardGPT2
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-classification - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.23 inference: false datasets: - keremberke/chest-xray-classification model-index: - name: keremberke/yolov8m-chest-xray-classification results: - task: type: image-classification dataset: type: keremberke/chest-xray-classification name: chest-xray-classification split: validation metrics: - type: accuracy value: 0.95533 # min: 0.0 - max: 1.0 name: top1 accuracy - type: accuracy value: 1 # min: 0.0 - max: 1.0 name: top5 accuracy --- <div align="center"> <img width="640" alt="keremberke/yolov8m-chest-xray-classification" src="https://huggingface.co/keremberke/yolov8m-chest-xray-classification/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['NORMAL', 'PNEUMONIA'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.24 ultralytics==8.0.23 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, postprocess_classify_output # load model model = YOLO('keremberke/yolov8m-chest-xray-classification') # set model parameters model.overrides['conf'] = 0.25 # model confidence threshold # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].probs) # [0.1, 0.2, 0.3, 0.4] processed_result = postprocess_classify_output(model, result=results[0]) print(processed_result) # {"cat": 0.4, "dog": 0.6} ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Cameron/BERT-Jigsaw
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-classification - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/pokemon-classification model-index: - name: keremberke/yolov8n-pokemon-classification results: - task: type: image-classification dataset: type: keremberke/pokemon-classification name: pokemon-classification split: validation metrics: - type: accuracy value: 0.02322 # min: 0.0 - max: 1.0 name: top1 accuracy - type: accuracy value: 0.09016 # min: 0.0 - max: 1.0 name: top5 accuracy --- <div align="center"> <img width="640" alt="keremberke/yolov8n-pokemon-classification" src="https://huggingface.co/keremberke/yolov8n-pokemon-classification/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Abra', 'Aerodactyl', 'Alakazam', 'Alolan Sandslash', 'Arbok', 'Arcanine', 'Articuno', 'Beedrill', 'Bellsprout', 'Blastoise', 'Bulbasaur', 'Butterfree', 'Caterpie', 'Chansey', 'Charizard', 'Charmander', 'Charmeleon', 'Clefable', 'Clefairy', 'Cloyster', 'Cubone', 'Dewgong', 'Diglett', 'Ditto', 'Dodrio', 'Doduo', 'Dragonair', 'Dragonite', 'Dratini', 'Drowzee', 'Dugtrio', 'Eevee', 'Ekans', 'Electabuzz', 'Electrode', 'Exeggcute', 'Exeggutor', 'Farfetchd', 'Fearow', 'Flareon', 'Gastly', 'Gengar', 'Geodude', 'Gloom', 'Golbat', 'Goldeen', 'Golduck', 'Golem', 'Graveler', 'Grimer', 'Growlithe', 'Gyarados', 'Haunter', 'Hitmonchan', 'Hitmonlee', 'Horsea', 'Hypno', 'Ivysaur', 'Jigglypuff', 'Jolteon', 'Jynx', 'Kabuto', 'Kabutops', 'Kadabra', 'Kakuna', 'Kangaskhan', 'Kingler', 'Koffing', 'Krabby', 'Lapras', 'Lickitung', 'Machamp', 'Machoke', 'Machop', 'Magikarp', 'Magmar', 'Magnemite', 'Magneton', 'Mankey', 'Marowak', 'Meowth', 'Metapod', 'Mew', 'Mewtwo', 'Moltres', 'MrMime', 'Muk', 'Nidoking', 'Nidoqueen', 'Nidorina', 'Nidorino', 'Ninetales', 'Oddish', 'Omanyte', 'Omastar', 'Onix', 'Paras', 'Parasect', 'Persian', 'Pidgeot', 'Pidgeotto', 'Pidgey', 'Pikachu', 'Pinsir', 'Poliwag', 'Poliwhirl', 'Poliwrath', 'Wigglytuff', 'Zapdos', 'Zubat'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, postprocess_classify_output # load model model = YOLO('keremberke/yolov8n-pokemon-classification') # set model parameters model.overrides['conf'] = 0.25 # model confidence threshold # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].probs) # [0.1, 0.2, 0.3, 0.4] processed_result = postprocess_classify_output(model, result=results[0]) print(processed_result) # {"cat": 0.4, "dog": 0.6} ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Cameron/BERT-SBIC-offensive
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: en thumbnail: http://www.huggingtweets.com/rhilever/1674878721821/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1611525483770044416/fYPREQ1N_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Rhilever</div> <div style="text-align: center; font-size: 14px;">@rhilever</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Rhilever. | Data | Rhilever | | --- | --- | | Tweets downloaded | 2728 | | Retweets | 326 | | Short tweets | 402 | | Tweets kept | 2000 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/imxxkyr1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @rhilever's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/bvkpy3yi) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/bvkpy3yi/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/rhilever') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Cameron/BERT-SBIC-targetcategory
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: creativeml-openrail-m base_model: CompVis/stable-diffusion-v1-4 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- # KerasCV Stable Diffusion in Diffusers 🧨🤗 The pipeline contained in this repository was created using [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers). The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.). Following weight paths (KerasCV) were used : ['https://huggingface.co/sayakpaul/dreambooth-keras-dogs-unet/resolve/main/lr_1e-6_steps_1000.h5']
Cameron/BERT-eec-emotion
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
2023-01-28T04:19:19Z
--- language: en thumbnail: http://www.huggingtweets.com/maxylobes/1674880251172/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1618233941110173696/x6aWoIH3_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Maxy</div> <div style="text-align: center; font-size: 14px;">@maxylobes</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Maxy. | Data | Maxy | | --- | --- | | Tweets downloaded | 3245 | | Retweets | 236 | | Short tweets | 142 | | Tweets kept | 2867 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/xfg543v2/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @maxylobes's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/ge6894ny) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/ge6894ny/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/maxylobes') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Cameron/BERT-jigsaw-severetoxic
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-01-28T04:23:35Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9822222222222222 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0583 - Accuracy: 0.9822 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2655 | 1.0 | 190 | 0.1039 | 0.9707 | | 0.1519 | 2.0 | 380 | 0.0866 | 0.9715 | | 0.1402 | 3.0 | 570 | 0.0583 | 0.9822 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu117 - Datasets 2.9.0 - Tokenizers 0.13.2
Cameron/BERT-mdgender-convai-binary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
2023-01-28T04:36:59Z
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="shivr/FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cameron/BERT-mdgender-wizard
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-01-28T04:48:41Z
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-classification - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/pokemon-classification model-index: - name: keremberke/yolov8s-pokemon-classification results: - task: type: image-classification dataset: type: keremberke/pokemon-classification name: pokemon-classification split: validation metrics: - type: accuracy value: 0.02459 # min: 0.0 - max: 1.0 name: top1 accuracy - type: accuracy value: 0.0806 # min: 0.0 - max: 1.0 name: top5 accuracy --- <div align="center"> <img width="640" alt="keremberke/yolov8s-pokemon-classification" src="https://huggingface.co/keremberke/yolov8s-pokemon-classification/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Abra', 'Aerodactyl', 'Alakazam', 'Alolan Sandslash', 'Arbok', 'Arcanine', 'Articuno', 'Beedrill', 'Bellsprout', 'Blastoise', 'Bulbasaur', 'Butterfree', 'Caterpie', 'Chansey', 'Charizard', 'Charmander', 'Charmeleon', 'Clefable', 'Clefairy', 'Cloyster', 'Cubone', 'Dewgong', 'Diglett', 'Ditto', 'Dodrio', 'Doduo', 'Dragonair', 'Dragonite', 'Dratini', 'Drowzee', 'Dugtrio', 'Eevee', 'Ekans', 'Electabuzz', 'Electrode', 'Exeggcute', 'Exeggutor', 'Farfetchd', 'Fearow', 'Flareon', 'Gastly', 'Gengar', 'Geodude', 'Gloom', 'Golbat', 'Goldeen', 'Golduck', 'Golem', 'Graveler', 'Grimer', 'Growlithe', 'Gyarados', 'Haunter', 'Hitmonchan', 'Hitmonlee', 'Horsea', 'Hypno', 'Ivysaur', 'Jigglypuff', 'Jolteon', 'Jynx', 'Kabuto', 'Kabutops', 'Kadabra', 'Kakuna', 'Kangaskhan', 'Kingler', 'Koffing', 'Krabby', 'Lapras', 'Lickitung', 'Machamp', 'Machoke', 'Machop', 'Magikarp', 'Magmar', 'Magnemite', 'Magneton', 'Mankey', 'Marowak', 'Meowth', 'Metapod', 'Mew', 'Mewtwo', 'Moltres', 'MrMime', 'Muk', 'Nidoking', 'Nidoqueen', 'Nidorina', 'Nidorino', 'Ninetales', 'Oddish', 'Omanyte', 'Omastar', 'Onix', 'Paras', 'Parasect', 'Persian', 'Pidgeot', 'Pidgeotto', 'Pidgey', 'Pikachu', 'Pinsir', 'Poliwag', 'Poliwhirl', 'Poliwrath', 'Wigglytuff', 'Zapdos', 'Zubat'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, postprocess_classify_output # load model model = YOLO('keremberke/yolov8s-pokemon-classification') # set model parameters model.overrides['conf'] = 0.25 # model confidence threshold # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].probs) # [0.1, 0.2, 0.3, 0.4] processed_result = postprocess_classify_output(model, result=results[0]) print(processed_result) # {"cat": 0.4, "dog": 0.6} ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Cameron/BERT-rtgender-opgender-annotations
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.56 +/- 19.30 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Camzure/MaamiBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-28T05:02:37Z
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-classification - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.23 inference: false datasets: - keremberke/pokemon-classification model-index: - name: keremberke/yolov8m-pokemon-classification results: - task: type: image-classification dataset: type: keremberke/pokemon-classification name: pokemon-classification split: validation metrics: - type: accuracy value: 0.03279 # min: 0.0 - max: 1.0 name: top1 accuracy - type: accuracy value: 0.09699 # min: 0.0 - max: 1.0 name: top5 accuracy --- <div align="center"> <img width="640" alt="keremberke/yolov8m-pokemon-classification" src="https://huggingface.co/keremberke/yolov8m-pokemon-classification/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Abra', 'Aerodactyl', 'Alakazam', 'Alolan Sandslash', 'Arbok', 'Arcanine', 'Articuno', 'Beedrill', 'Bellsprout', 'Blastoise', 'Bulbasaur', 'Butterfree', 'Caterpie', 'Chansey', 'Charizard', 'Charmander', 'Charmeleon', 'Clefable', 'Clefairy', 'Cloyster', 'Cubone', 'Dewgong', 'Diglett', 'Ditto', 'Dodrio', 'Doduo', 'Dragonair', 'Dragonite', 'Dratini', 'Drowzee', 'Dugtrio', 'Eevee', 'Ekans', 'Electabuzz', 'Electrode', 'Exeggcute', 'Exeggutor', 'Farfetchd', 'Fearow', 'Flareon', 'Gastly', 'Gengar', 'Geodude', 'Gloom', 'Golbat', 'Goldeen', 'Golduck', 'Golem', 'Graveler', 'Grimer', 'Growlithe', 'Gyarados', 'Haunter', 'Hitmonchan', 'Hitmonlee', 'Horsea', 'Hypno', 'Ivysaur', 'Jigglypuff', 'Jolteon', 'Jynx', 'Kabuto', 'Kabutops', 'Kadabra', 'Kakuna', 'Kangaskhan', 'Kingler', 'Koffing', 'Krabby', 'Lapras', 'Lickitung', 'Machamp', 'Machoke', 'Machop', 'Magikarp', 'Magmar', 'Magnemite', 'Magneton', 'Mankey', 'Marowak', 'Meowth', 'Metapod', 'Mew', 'Mewtwo', 'Moltres', 'MrMime', 'Muk', 'Nidoking', 'Nidoqueen', 'Nidorina', 'Nidorino', 'Ninetales', 'Oddish', 'Omanyte', 'Omastar', 'Onix', 'Paras', 'Parasect', 'Persian', 'Pidgeot', 'Pidgeotto', 'Pidgey', 'Pikachu', 'Pinsir', 'Poliwag', 'Poliwhirl', 'Poliwrath', 'Wigglytuff', 'Zapdos', 'Zubat'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.24 ultralytics==8.0.23 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, postprocess_classify_output # load model model = YOLO('keremberke/yolov8m-pokemon-classification') # set model parameters model.overrides['conf'] = 0.25 # model confidence threshold # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].probs) # [0.1, 0.2, 0.3, 0.4] processed_result = postprocess_classify_output(model, result=results[0]) print(processed_result) # {"cat": 0.4, "dog": 0.6} ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Canadiancaleb/DialoGPT-small-jesse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 --- 小说模型 https://github.com/BlinkDL/AI-Writer/releases
Canadiancaleb/jessebot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-28T05:25:57Z
--- license: other tags: - generated_from_keras_callback model-index: - name: dousey/scene_segmentation results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dousey/scene_segmentation This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: nan - Validation Loss: nan - Validation Mean Iou: 0.0217 - Validation Mean Accuracy: 0.5 - Validation Overall Accuracy: 0.2545 - Validation Accuracy Background: 1.0 - Validation Accuracy Bleuet: 0.0 - Validation Accuracy Comptonie: nan - Validation Accuracy Kalmia: nan - Validation Iou Background: 0.0433 - Validation Iou Bleuet: 0.0 - Validation Iou Comptonie: nan - Validation Iou Kalmia: nan - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 6e-05, 'decay_steps': 76500, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Validation Mean Iou | Validation Mean Accuracy | Validation Overall Accuracy | Validation Accuracy Background | Validation Accuracy Bleuet | Validation Accuracy Comptonie | Validation Accuracy Kalmia | Validation Iou Background | Validation Iou Bleuet | Validation Iou Comptonie | Validation Iou Kalmia | Epoch | |:----------:|:---------------:|:-------------------:|:------------------------:|:---------------------------:|:------------------------------:|:--------------------------:|:-----------------------------:|:--------------------------:|:-------------------------:|:---------------------:|:------------------------:|:---------------------:|:-----:| | nan | nan | 0.0217 | 0.5 | 0.2545 | 1.0 | 0.0 | nan | nan | 0.0433 | 0.0 | nan | nan | 0 | | nan | nan | 0.0217 | 0.5 | 0.2545 | 1.0 | 0.0 | nan | nan | 0.0433 | 0.0 | nan | nan | 1 | ### Framework versions - Transformers 4.26.0 - TensorFlow 2.9.2 - Datasets 2.9.0 - Tokenizers 0.13.2
Capreolus/birch-bert-large-mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: piyusharma/bert-base-uncased-finetuned-lex results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # piyusharma/bert-base-uncased-finetuned-lex This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2112 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 0.2112 | 0 | ### Framework versions - Transformers 4.26.0 - TensorFlow 2.9.2 - Datasets 2.9.0 - Tokenizers 0.13.2
Capreolus/birch-bert-large-msmarco_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.00 +/- 0.25 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Captain-1337/CrudeBERT
[ "pytorch", "bert", "text-classification", "arxiv:1908.10063", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: model1_absa_cont results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # model1_absa_cont This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2382 - Precision: 0.2445 - Recall: 0.3046 - F1: 0.2712 - Accuracy: 0.5420 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 7 | 1.2382 | 0.2445 | 0.3046 | 0.2712 | 0.5420 | | No log | 2.0 | 14 | 1.2382 | 0.2445 | 0.3046 | 0.2712 | 0.5420 | | No log | 3.0 | 21 | 1.2382 | 0.2445 | 0.3046 | 0.2712 | 0.5420 | | No log | 4.0 | 28 | 1.2382 | 0.2445 | 0.3046 | 0.2712 | 0.5420 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Carlork314/Carlos
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - Ramos-Ramos/nllb-eng-tgl-12k --- # Ramos-Ramos/xlm-roberta-base-en-tl This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Ramos-Ramos/xlm-roberta-base-en-tl') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl') model = AutoModel.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Ramos-Ramos/xlm-roberta-base-en-tl) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 308 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 0, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 200, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Carlork314/Xd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-segmentation - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/pcb-defect-segmentation model-index: - name: keremberke/yolov8n-pcb-defect-segmentation results: - task: type: image-segmentation dataset: type: keremberke/pcb-defect-segmentation name: pcb-defect-segmentation split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.51186 # min: 0.0 - max: 1.0 name: [email protected](box) - type: precision # since [email protected] is not available on hf.co/metrics value: 0.51667 # min: 0.0 - max: 1.0 name: [email protected](mask) --- <div align="center"> <img width="640" alt="keremberke/yolov8n-pcb-defect-segmentation" src="https://huggingface.co/keremberke/yolov8n-pcb-defect-segmentation/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Dry_joint', 'Incorrect_installation', 'PCB_damage', 'Short_circuit'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8n-pcb-defect-segmentation') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) print(results[0].masks) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Carolhuehuehuehue/Sla
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('garg-aayush/sd-class-butterflies-32') image = pipeline().images[0] image ```
Cat/Kitty
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: julcto --- ### xin-dreambooth-huggingface Dreambooth model trained by WildWill with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v1-5 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: julcto (use that on your prompt) ![julcto 0](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%281%29.jpg)![julcto 1](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%282%29.jpg)![julcto 2](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%283%29.jpg)![julcto 3](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%284%29.jpg)![julcto 4](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%285%29.jpg)![julcto 5](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%286%29.jpg)![julcto 6](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%287%29.jpg)![julcto 7](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%288%29.jpg)![julcto 8](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%289%29.jpg)![julcto 9](https://huggingface.co/WildWill/xin-dreambooth-huggingface/resolve/main/concept_images/julcto_%2810%29.jpg)
Cathy/reranking_model
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - generated_from_trainer model-index: - name: fine_tuned_beyonce results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fine_tuned_beyonce This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Cdial/hausa-asr
[ "wav2vec2", "automatic-speech-recognition", "ha", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('garg-aayush/sd-class-butterflies-64') image = pipeline().images[0] image ```
dccuchile/albert-base-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-segmentation - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/pcb-defect-segmentation model-index: - name: keremberke/yolov8s-pcb-defect-segmentation results: - task: type: image-segmentation dataset: type: keremberke/pcb-defect-segmentation name: pcb-defect-segmentation split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.51452 # min: 0.0 - max: 1.0 name: [email protected](box) - type: precision # since [email protected] is not available on hf.co/metrics value: 0.49054 # min: 0.0 - max: 1.0 name: [email protected](mask) --- <div align="center"> <img width="640" alt="keremberke/yolov8s-pcb-defect-segmentation" src="https://huggingface.co/keremberke/yolov8s-pcb-defect-segmentation/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Dry_joint', 'Incorrect_installation', 'PCB_damage', 'Short_circuit'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8s-pcb-defect-segmentation') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) print(results[0].masks) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
dccuchile/albert-base-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-01-28T07:54:47Z
--- license: cc-by-4.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-ner This model is a fine-tuned version of [deepset/deberta-v3-base-squad2](https://huggingface.co/deepset/deberta-v3-base-squad2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4783 - Precision: 0.3264 - Recall: 0.3591 - F1: 0.3420 - Accuracy: 0.8925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 - mixed_precision_training: Native AMP - label_smoothing_factor: 0.05 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 39.8167 | 1.0 | 760 | 0.3957 | 0.1844 | 0.2909 | 0.2257 | 0.8499 | | 21.7333 | 2.0 | 1520 | 0.3853 | 0.2118 | 0.3273 | 0.2571 | 0.8546 | | 13.8859 | 3.0 | 2280 | 0.3631 | 0.2443 | 0.2909 | 0.2656 | 0.8789 | | 20.6586 | 4.0 | 3040 | 0.3961 | 0.2946 | 0.3455 | 0.3180 | 0.8753 | | 13.8654 | 5.0 | 3800 | 0.3821 | 0.2791 | 0.3273 | 0.3013 | 0.8877 | | 12.6942 | 6.0 | 4560 | 0.4393 | 0.3122 | 0.3364 | 0.3239 | 0.8909 | | 25.0549 | 7.0 | 5320 | 0.4542 | 0.3106 | 0.3727 | 0.3388 | 0.8824 | | 5.6816 | 8.0 | 6080 | 0.4432 | 0.2820 | 0.3409 | 0.3086 | 0.8774 | | 13.1296 | 9.0 | 6840 | 0.4509 | 0.2884 | 0.35 | 0.3162 | 0.8824 | | 7.7173 | 10.0 | 7600 | 0.4265 | 0.3170 | 0.3818 | 0.3464 | 0.8919 | | 6.7922 | 11.0 | 8360 | 0.4749 | 0.3320 | 0.3818 | 0.3552 | 0.8892 | | 5.4287 | 12.0 | 9120 | 0.4564 | 0.2917 | 0.3818 | 0.3307 | 0.8805 | | 7.4153 | 13.0 | 9880 | 0.4735 | 0.2963 | 0.3273 | 0.3110 | 0.8871 | | 9.1154 | 14.0 | 10640 | 0.4553 | 0.3416 | 0.3773 | 0.3585 | 0.8894 | | 5.999 | 15.0 | 11400 | 0.4489 | 0.3203 | 0.4091 | 0.3593 | 0.8880 | | 9.5128 | 16.0 | 12160 | 0.4947 | 0.3164 | 0.3682 | 0.3403 | 0.8883 | | 5.6713 | 17.0 | 12920 | 0.4705 | 0.3527 | 0.3864 | 0.3688 | 0.8919 | | 12.2119 | 18.0 | 13680 | 0.4617 | 0.3123 | 0.3591 | 0.3340 | 0.8857 | | 8.5658 | 19.0 | 14440 | 0.4764 | 0.3092 | 0.35 | 0.3284 | 0.8944 | | 11.0664 | 20.0 | 15200 | 0.4557 | 0.3187 | 0.3636 | 0.3397 | 0.8905 | | 6.7161 | 21.0 | 15960 | 0.4468 | 0.3210 | 0.3955 | 0.3544 | 0.8956 | | 9.0448 | 22.0 | 16720 | 0.5120 | 0.2872 | 0.3682 | 0.3227 | 0.8792 | | 6.573 | 23.0 | 17480 | 0.4990 | 0.3307 | 0.3773 | 0.3524 | 0.8869 | | 5.0543 | 24.0 | 18240 | 0.4763 | 0.3028 | 0.3455 | 0.3227 | 0.8899 | | 6.8797 | 25.0 | 19000 | 0.4814 | 0.2780 | 0.3273 | 0.3006 | 0.8913 | | 7.7544 | 26.0 | 19760 | 0.4695 | 0.3024 | 0.3409 | 0.3205 | 0.8946 | | 4.8346 | 27.0 | 20520 | 0.4849 | 0.3154 | 0.3455 | 0.3297 | 0.8931 | | 4.4766 | 28.0 | 21280 | 0.4809 | 0.2925 | 0.3364 | 0.3129 | 0.8913 | | 7.9149 | 29.0 | 22040 | 0.4756 | 0.3238 | 0.3591 | 0.3405 | 0.8930 | | 7.3033 | 30.0 | 22800 | 0.4783 | 0.3264 | 0.3591 | 0.3420 | 0.8925 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.7.1 - Datasets 2.8.0 - Tokenizers 0.13.2
dccuchile/albert-large-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer model-index: - name: gpt_16_5_3e-5_lp5_nb5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt_16_5_3e-5_lp5_nb5 This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.9078 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 3.3245 | 0.38 | 1000 | 4.0176 | | 3.1222 | 0.76 | 2000 | 3.9845 | | 2.9992 | 1.13 | 3000 | 3.9635 | | 2.8843 | 1.51 | 4000 | 3.9377 | | 2.882 | 1.89 | 5000 | 3.9268 | | 2.7411 | 2.27 | 6000 | 3.9208 | | 2.7204 | 2.64 | 7000 | 3.9160 | | 2.7106 | 3.02 | 8000 | 3.9171 | | 2.5857 | 3.4 | 9000 | 3.9162 | | 2.5863 | 3.78 | 10000 | 3.9037 | | 2.5674 | 4.15 | 11000 | 3.9135 | | 2.4901 | 4.53 | 12000 | 3.9125 | | 2.505 | 4.91 | 13000 | 3.9078 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.9.0+cu102 - Datasets 2.8.0 - Tokenizers 0.13.2
dccuchile/albert-large-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
dccuchile/albert-large-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - image-segmentation - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.23 inference: false datasets: - keremberke/pcb-defect-segmentation model-index: - name: keremberke/yolov8m-pcb-defect-segmentation results: - task: type: image-segmentation dataset: type: keremberke/pcb-defect-segmentation name: pcb-defect-segmentation split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.56836 # min: 0.0 - max: 1.0 name: [email protected](box) - type: precision # since [email protected] is not available on hf.co/metrics value: 0.5573 # min: 0.0 - max: 1.0 name: [email protected](mask) --- <div align="center"> <img width="640" alt="keremberke/yolov8m-pcb-defect-segmentation" src="https://huggingface.co/keremberke/yolov8m-pcb-defect-segmentation/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['Dry_joint', 'Incorrect_installation', 'PCB_damage', 'Short_circuit'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.24 ultralytics==8.0.23 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8m-pcb-defect-segmentation') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) print(results[0].masks) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
dccuchile/albert-tiny-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
# descripcion del modelo <!-- Provide a quick summary of what the model is/does. --> Modelo gpt-2_Neo125M Fine Tune, para la prediccion de precios de casas o apartamentos en Cali-Colombia [Descargue todos los archivos requeridos desde Dropbox](https://www.dropbox.com/scl/fo/4mj054l8eha31pcc5z245/h?dl=0&rlkey=q1s2pycl9hh65b3xvqiukh9g6). - **Developed by:** Nicolai Potes - **Language:** Python - **Finetuned from model :** [gpt-neo-125M](https://huggingface.co/EleutherAI/gpt-neo-125M). # Training Details ``` Num examples = 779 Num Epochs = 500 Instantaneous batch size per device = 80 Total train batch size (w. parallel, distributed & accumulation) = 80 Gradient Accumulation steps = 1 Total optimization steps = 5000 Number of trainable parameters = 125200128 ``` # Training Evaluate ``` {'eval_loss': 1.341125726699829, 'eval_runtime': 23.3347, 'eval_samples_per_second': 300.111, 'eval_steps_per_second': 3.771, 'epoch': 500.0} ``` ## Training Data datos sacados de https://www.metrocuadrado.com/ formato para el entrenamiento del mododelo ``` 'meter: 3651685 \n area: 267 \n bathroom: 4 \n room: 4 \n property: 1 \n price: 975000000', 'meter: 3206498 \n area: 70 \n bathroom: 3 \n room: 4 \n property: 2 \n price: 225000000', 'meter: 2181818 \n area: 110 \n bathroom: 2 \n room: 3 \n property: 2 \n price: 240000000', 'meter: 5882352 \n area: 306 \n bathroom: 4 \n room: 4 \n property: 2 \n price: 1800000000', 'meter: 2827586 \n area: 58 \n bathroom: 2 \n room: 2 \n property: 2 \n price: 164000000', 'meter: 7382550 \n area: 149 \n bathroom: 4 \n room: 3 \n property: 2 \n price: 1100000000', 'meter: 2833333 \n area: 300 \n bathroom: 3 \n room: 3 \n property: 1 \n price: 850000000', 'meter: 3678474 \n area: 73 \n bathroom: 2 \n room: 3 \n property: 2 \n price: 270000000', 'meter: 2254901 \n area: 51 \n bathroom: 2 \n room: 2 \n property: 2 \n price: 115000000', 'meter: 2500000 \n area: 90 \n bathroom: 3 \n room: 3 \n property: 2 \n price: 225000000', 'meter: 4508196 \n area: 122 \n bathroom: 5 \n room: 4 \n property: 2 \n price: 550000000', 'meter: 3489583 \n area: 96 \n bathroom: 3 \n room: 3 \n property: 2 \n price: 335000000', 'meter: 2151898 \n area: 395 \n bathroom: 5 \n room: 5 \n property: 1 \n price: 850000000', ``` ### Hardware GPU ``` +-----------------------------------------------------------------------------+ | NVIDIA-SMI 510.47.03 Driver Version: 510.47.03 CUDA Version: 11.6 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 49C P0 29W / 70W | 0MiB / 15360MiB | 5% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+ ``` ### Librerias requeridas ``` pip install transformers pip install torch ``` ### Codigo Python para cargar el modelo y predecir el valor de una propiedad (Casa/Apartamento) ```Python import pandas as pd import torch import transformers import re ''' #dado el caso de estar en Google colab from google.colab import drive drive.mount('/content/drive') path="/content/drive/My Drive/DatosMetroCuadradoPrueba/" ''' path="[Direccion de la carpeta donde tiene el MODELO]" path_carga= path+"modeloEntrenadoPreciosCasasApartamentos" from transformers import GPT2Tokenizer, GPTNeoForCausalLM new_modelPredict = GPTNeoForCausalLM.from_pretrained(path_carga).cuda() tokenizer2 = GPT2Tokenizer.from_pretrained(path_carga) new_modelPredict.resize_token_embeddings(len(tokenizer2)) tipo_propiedad= 1 # 1: casa , 2:apartamento habitaciones= 5 baños= 5 area= 580 valor_inmueble= 1500000000 valorMetroCuadrado= int(valor_inmueble/area) propiedad = f"<|startoftext|>meter: {valorMetroCuadrado} \n area: {area} \n bathroom: {baños} \n room: {habitaciones} \n property: {tipo_propiedad} \n price:" print("Texto:",propiedad) generated = tokenizer2(propiedad, # <|pad|> return_tensors="pt").input_ids.cuda() sample_outputs = new_modelPredict.generate(generated, do_sample=True, top_k=50, max_length=100, num_beams=7, #3 top_p=1.65, temperature=.69, num_return_sequences=1, pad_token_id = 0) price= [] # for i, sample_output in enumerate(sample_outputs): text= tokenizer2.decode(sample_output, skip_special_tokens=True) num= text.split("\n")[-1].split("price: ")[1] try: num= re.sub(r'[^\d.]', '',num )#[0] price.append( num ) except: pass # pd.set_option('display.float_format', '{.2f}'.format) priceData2= pd.DataFrame(price,columns=['price']).astype(int) print(priceData2) ```
dccuchile/albert-tiny-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 854.00 +/- 253.18 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga slomek -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga slomek -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga slomek ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
dccuchile/albert-tiny-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-01-28T08:44:49Z
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - object-detection - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/valorant-object-detection model-index: - name: keremberke/yolov8n-valorant-detection results: - task: type: object-detection dataset: type: keremberke/valorant-object-detection name: valorant-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.93688 # min: 0.0 - max: 1.0 name: [email protected](box) --- <div align="center"> <img width="640" alt="keremberke/yolov8n-valorant-detection" src="https://huggingface.co/keremberke/yolov8n-valorant-detection/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['dropped spike', 'enemy', 'planted spike', 'teammate'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8n-valorant-detection') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
dccuchile/albert-xlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
2023-01-28T08:46:00Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 481.00 +/- 176.15 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Periramm -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Periramm -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Periramm ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 10000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
dccuchile/albert-xxlarge-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - object-detection - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/valorant-object-detection model-index: - name: keremberke/yolov8s-valorant-detection results: - task: type: object-detection dataset: type: keremberke/valorant-object-detection name: valorant-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.97138 # min: 0.0 - max: 1.0 name: [email protected](box) --- <div align="center"> <img width="640" alt="keremberke/yolov8s-valorant-detection" src="https://huggingface.co/keremberke/yolov8s-valorant-detection/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['dropped spike', 'enemy', 'planted spike', 'teammate'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8s-valorant-detection') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
dccuchile/albert-xxlarge-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
2023-01-28T09:45:11Z
--- language: - ja --- プロンプト用KeyWord:pirotess - pirotess, 1girl, solo, pointy ears, dark skin, dark-skinned female, elf, sword, weapon, breasts, long hair, dark elf, circlet, center opening, white hair ![Sample_image](https://huggingface.co/nanashisan/LoRa_pirotess/resolve/main/A1_sample.png)
dccuchile/albert-xxlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="ThuyVuPhuong/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/albert-base-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
586
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - allenai/nllb --- # Ramos-Ramos/xlm-roberta-base-en-tl-0-1000 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Ramos-Ramos/xlm-roberta-base-en-tl-0-1000') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-1000') model = AutoModel.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-1000') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Ramos-Ramos/xlm-roberta-base-en-tl-0-1000) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 12406 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
dccuchile/albert-xlarge-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
91
2023-01-28T09:55:21Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi_v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="ThuyVuPhuong/q-Taxi_v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/bert-base-spanish-wwm-cased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-01-28T10:13:20Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 272.23 +/- 22.90 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/distilbert-base-spanish-uncased-finetuned-mldoc
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 251.19 +/- 17.89 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/distilbert-base-spanish-uncased-finetuned-pawsx
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="dungtd2403/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/distilbert-base-spanish-uncased
[ "pytorch", "distilbert", "fill-mask", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
670
null
--- license: creativeml-openrail-m base_model: darkstorm2150/Protogen_x5.8_Official_Release tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - ScottHan/model These are LoRA adaption weights for darkstorm2150/Protogen_x5.8_Official_Release. The weights were trained on guoquan using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
CennetOguz/distilbert-base-uncased-finetuned-recipe
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2023-01-28T10:59:27Z
--- language: - zh tags: - AIvtuber - VirtuaReal --- # SUImodels ### 岁己所有的模型都在这里 ### 包括sovits3.0、4.0及onnx,~~还有以后会出的vits模型~~ VITS模型有需要的联系我,主要是走一下免责协议什么的有的没的过程( ## Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [謬紗特](https://space.bilibili.com/59442895) - **Model type:** [so-vits-svc 3.0 48kHz](https://github.com/svc-develop-team/so-vits-svc/tree/3.0-48k)、[so-vits-svc 4.0](https://github.com/svc-develop-team/so-vits-svc) - **Demo:** [SPACE: AI岁己(歌声变声器)](https://huggingface.co/spaces/Miuzarte/SUI-svc-3.0)、[SPACE: AI岁己(歌声变声器)第二代](https://huggingface.co/spaces/Miuzarte/SUI-svc-4.0) ### pth文件名的训练步数是程序按学习率等超参数得出的步数,onnx文件名的步数为实际训练步数 |sovits3_v1|Base/G_1000000.pth|Singing/G_1M111000.pth|Singing/G_100000.pth| |-:|:-:|:-:|:-:| |onnx|Base/suijiSUI_v1_1M_SoVits.onnx|Singing/suijiSUI_v1_1M111000_SoVits.onnx|Singing/suijiSUI_v1_100000_SoVits.onnx| |训练集|12月录播(除电台)、出道至今22条歌投、10条歌切、圣诞音声(27.5小时)|Base/G_1000000.pth作为底模_2022年所有唱歌投稿、唱歌切片、圣诞音声(3.9小时)|2022年所有唱歌投稿、唱歌切片、圣诞音声(3.9小时)| ### 因为v2练着练着突然sovits4.0就出来了所以200k直接收了,弃用 |sovits3_v2|Base/G_100000.pth|Singing/G_160000.pth| |-:|:-:|:-:| |onnx|Base/suijiSUI_v2_100000_SoVits.onnx|Singing/suijiSUI_v2_100k100000_SoVits.onnx| |训练集|22年12月、23年1月的录播(06:47:46)|Base/G_100000.pth作为底模_22年12月、23年1月、23年2月1-17日的录播(除电台,共计268:07:43)、岁己的投稿、A1in_sy11月及以前的歌切| ### 160k开始loss就没再往下了,后两个估计有一丁点过拟合,然后我个人也听不出这三个模型有什么区别,有强迫症的可以自己再仔细对比一下,我个人倾向于折中使用Singing/G_210000.pth |sovits4_v3|Base/G_100000.pth|Singing/G_160000.pth| |-:|:-:|:-:| |onnx|Base/suijiSUI_v3_100000_SoVits.onnx|Singing/suijiSUI_v3_100k100000_SoVits.onnx| |训练集|22年12月、23年1月的录播(06:47:46)|Base/G_100000.pth作为底模_22年12月、23年1月、23年2月1-17日的录播(除电台,共计268:07:43)、岁己的投稿、A1in_sy11月及以前的歌切| |sovits4_v3|Singing/G_210000.pth|Singing/G_260000.pth|Singing/kmeans_10000.pt| |-:|:-:|:-:|:-:| |onnx|Singing/suijiSUI_v3_100k150000_SoVits.onnx|Singing/suijiSUI_v3_100k200000_SoVits.onnx|聚类模型,暂无onnx| |训练集|{同Singing/G_160000.pth}|{同Singing/G_160000.pth}|{同Singing/G_160000.pth}| ### ~~sovits4.0-v2实在是没算力能用了,勉强跑个200k就算了~~ ### sovits4.0-v2跟4.0跑了一样的步数,能对比一下两个版本之间的差别 (我是听不出区别,建议用4.0,仓库更新了不少新功能) |sovits4-v2_v4|Base/G_100000.pth|Singing/G_160000.pth| |-:|:-:|:-:| |onnx|Base/suijiSUI_v4_100000_SoVits.onnx|Singing/suijiSUI_v4_100k100000_SoVits.onnx| |训练集|22年12月、23年1月的录播(06:47:46)|Base/G_100000.pth作为底模_22年12月、23年1月、23年2月1-17日的录播(除电台,共计268:07:43)、岁己的投稿、A1in_sy11月及以前的歌切| |sovits4-v2_v4|Singing/G_210000.pth|Singing/G_260000.pth|Singing/kmeans_10000.pt| |-:|:-:|:-:|:-:| |onnx|Singing/suijiSUI_v4_100k150000_SoVits.onnx|Singing/suijiSUI_v4_100k200000_SoVits.onnx|聚类模型,暂无onnx| |训练集|{同Singing/G_160000.pth}|{同Singing/G_160000.pth}|{同Singing/G_160000.pth}| ### v2、v3(v4的学习率使用默认的0.0002)的dataset、filelist、config完全一致,可用作sovits3.0与4.0的对比 ### 数据集: [Miuzarte/SUISovitsDataForBaseModel](https://huggingface.co/datasets/Miuzarte/SUISovitsDataForBaseModel)、[Miuzarte/SUISovitsDataForSingingModel](https://huggingface.co/datasets/Miuzarte/SUISovitsDataForSingingModel) ## MoeSS\\Mods配置文件 #### sovits3.0需要MoeSS\\hubert\\[hubert.onnx](https://huggingface.co/NaruseMioShirakana/MoeSS-SUBModel/blob/main/hubert.7z) 岁己SUI_v1_1M.json (suijiSUI_v1_1M\\) ```json { "Folder" : "suijiSUI_v1_1M", "Name" : "岁己SUI_v1_1M", "Type" : "SoVits", "Rate" : 48000, "Hop" : 320, "Hubert": "hubert", "SoVits3": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v1_1M111k.json (suijiSUI_v1_1M111000\\) ```json { "Folder" : "suijiSUI_v1_1M111000", "Name" : "岁己SUI_v1_1M111k", "Type" : "SoVits", "Rate" : 48000, "Hop" : 320, "Hubert": "hubert", "SoVits3": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v1_100k.json (suijiSUI_v1_100000\\) ```json { "Folder" : "suijiSUI_v1_100000", "Name" : "岁己SUI_v1_100k", "Type" : "SoVits", "Rate" : 48000, "Hop" : 320, "Hubert": "hubert", "SoVits3": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v2_100k.json (suijiSUI_v2_100000\\) ```json { "Folder" : "suijiSUI_v2_100000", "Name" : "岁己SUI_v2_100k", "Type" : "SoVits", "Rate" : 48000, "Hop" : 320, "Hubert": "hubert", "SoVits3": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v2_100k100k.json (suijiSUI_v2_100k100000\\) ```json { "Folder" : "suijiSUI_v2_100k100000", "Name" : "岁己SUI_v2_100k100k", "Type" : "SoVits", "Rate" : 48000, "Hop" : 320, "Hubert": "hubert", "SoVits3": true, "Characters" : ["岁己SUI"] } ``` #### sovits4.0需要MoeSS\\hubert\\[hubert4.0.onnx](https://huggingface.co/NaruseMioShirakana/MoeSS-SUBModel/blob/main/hubert4.0.7z) sovits4.0被支持于MoeSS v4.2.0,建议使用最新版[[MoeSS/releases]](https://github.com/NaruseMioShirakana/MoeSS/releases) #### 更建议使用[sovits4.0](https://github.com/innnky/so-vits-svc/tree/4.0)/[sovits4.0-v2](https://github.com/svc-develop-team/so-vits-svc/tree/4.0-v2)的inference_main.py进行推理 岁己SUI_v3_100k.json (suijiSUI_v3_100000\\) ```json { "Folder" : "suijiSUI_v3_100000", "Name" : "岁己SUI_v3_100k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v3_100k100k.json (suijiSUI_v3_100k100000\\) ```json { "Folder" : "suijiSUI_v3_100k100000", "Name" : "岁己SUI_v3_100k100k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v3_100k150k.json (suijiSUI_v3_100k150000\\) ```json { "Folder" : "suijiSUI_v3_100k150000", "Name" : "岁己SUI_v3_100k150k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v3_100k200k.json (suijiSUI_v3_100k200000\\) ```json { "Folder" : "suijiSUI_v3_100k200000", "Name" : "岁己SUI_v3_100k200k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v4_100k.json (suijiSUI_v4_100000\\) ```json { "Folder" : "suijiSUI_v4_100000", "Name" : "岁己SUI_v4_100k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v4_100k100k.json (suijiSUI_v4_100k100000\\) ```json { "Folder" : "suijiSUI_v4_100k100000", "Name" : "岁己SUI_v4_100k100k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v4_100k150k.json (suijiSUI_v4_100k150000\\) ```json { "Folder" : "suijiSUI_v4_100k150000", "Name" : "岁己SUI_v4_100k150k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ``` 岁己SUI_v4_100k200k.json (suijiSUI_v4_100k200000\\) ```json { "Folder" : "suijiSUI_v4_100k200000", "Name" : "岁己SUI_v4_100k200k", "Type" : "SoVits", "Rate" : 44100, "Hop" : 512, "Hubert": "hubert4.0", "SoVits4": true, "Characters" : ["岁己SUI"] } ```
Chaewon/mnmt_decoder_en_gpt2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-28T11:29:27Z
--- license: cc-by-nc-sa-4.0 language: - en library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers --- # 本人郑重声明:本模型禁止用于训练基于明星、公众人物肖像的风格模型训练,因为这会带来争议,对AI社区的发展造成不良的负面影响。 # 本模型注明:训练素材中不包含任何真人素材。 | 版本 | 效果图 | | --- | --- | | **GuoFeng3.3** | ![min_00193-3556647833.png.jpg](https://ai-studio-static-online.cdn.bcebos.com/fd09b7f02da24d3391bea0c639a14a80c12aec9467484d67a7ab5a32cef84bb1) | | **GuoFeng3.2_light** | ![178650.png](https://ai-studio-static-online.cdn.bcebos.com/9d5e36ad89f947a39b631f70409366c3bd531aa3a1214be7b0cf115daa62fb94) | | **GuoFeng3.2** | ![00044-4083026190-1girl, beautiful, realistic.png.png](https://ai-studio-static-online.cdn.bcebos.com/ff5c7757f97849ecb5320bfbe7b692d1cb12da547c9348058a842ea951369ff8) | | **GuoFeng3** | ![e1.png](https://ai-studio-static-online.cdn.bcebos.com/be966cf5c86d431cb33d33396560f546fdd4c15789d54203a8bd15c35abd7dc2) | # 介绍 - GuoFeng3 欢迎使用GuoFeng3模型 - (TIP:这个版本的名字进行了微调),这是一个中国华丽古风风格模型,也可以说是一个古风游戏角色模型,具有2.5D的质感。第三代大幅度减少上手难度,增加了场景元素与男性古风人物,除此之外为了模型能更好的适应其它TAG,还增加了其它风格的元素。这一代对脸和手的崩坏有一定的修复,同时素材大小也提高到了最长边1024。 根据个人的实验与收到的反馈,国风模型系列的第二代,在人物,与大头照的效果表现比三代更好,如果你有这方面需求不妨试试第二代。 2.0版本:[https://huggingface.co/xiaolxl/Gf_style2](https://huggingface.co/xiaolxl/Gf_style2) GuoFeng3:原始模型 GuoFeng3.1:对GuoFeng3人像进行了微调修复 GuoFeng3.2:如果你不知道选择GuoFeng3还是GuoFeng2,可以直接使用此版本 GuoFeng3.2_light:通过GuoFeng3.2融合了基于 Noise Offset 训练的Lora使得模型能够画出更漂亮的光影效果(Lora:epi_noiseoffset/Theovercomer8's Contrast Fix) GuoFeng3.2_Lora:国风3.2 Lora版本 GuoFeng3.2_Lora_big_light:国风3.2_light Lora版本 维度增大版本 GuoFeng3.2_f16:国风3.2 半精版本 GuoFeng3.2_light_f16:国风3.2_light 半精版本 GuoFeng3.3:此版本是基于3.2的一次较大的更新与改进,可以适配full body,即使你的tag不太好,模型也会对画面进行自动修改,不过因此模型出的脸会比较雷同。此模型似乎不需要超分,我的出图大小是768*1024,清晰度还不错。建议竖图,横图可能不清晰。Euler a即可。(DPM++ SDE Karras, DDIM也不错) -- Welcome to the GuoFeng3 model - (TIP: the name of this version has been fine-tuned). This is a Chinese gorgeous antique style model, which can also be said to be an antique game character model with a 2.5D texture. The third generation greatly reduces the difficulty of getting started, and adds scene elements and male antique characters. In addition, in order to better adapt the model to other TAGs, other style elements are also added. This generation has repaired the broken face and hands to a certain extent, and the size of the material has also increased to the longest side of 1024. According to personal experiments and feedback received, the second generation of the Guofeng model series performs better than the third generation in terms of characters and big head photos. If you have this need, you can try the second generation. Version 2.0:[https://huggingface.co/xiaolxl/Gf_style2](https://huggingface.co/xiaolxl/Gf_style2) GuoFeng3: original model GuoFeng3.1: The portrait of GuoFeng3 has been fine-tuned and repaired GuoFeng3.2: If you don't know whether to choose GuoFeng3 or GuoFeng2, you can use this version directly GuoFeng3.2_Light: Through GuoFeng3.2, Lora based on Noise Offset training is integrated to enable the model to draw more beautiful light and shadow effects (Lora: epi_noiseoffset/Theovercolor8's Contrast Fix) GuoFeng3.2_Lora: Guofeng3.2 Lora version GuoFeng3.2_Lora_big_Light: Guofeng3.2_Light Lora Version Dimension Increase Version GuoFeng3.2_F16: Guofeng3.2 semi-refined version GuoFeng3.2_light_f16: Guofeng3.2_Light semi-refined version GuoFeng3.3: This version is a major update and improvement based on 3.2, which can adapt to full bodies. Even if your tag is not good, the model will automatically modify the screen, but the faces produced by the model will be quite similar. This model doesn't seem to require supersession. My plot size is 768 * 1024, and the clarity is quite good. Suggest vertical view, horizontal view may not be clear. Euler a is sufficient. (DPM++SDE Karras, DDIM is also good) # 安装教程 - install 1. 将GuoFeng3.ckpt模型放入SD目录 - Put GuoFeng3.ckpt model into SD directory 2. 此模型自带VAE,如果你的程序不支持,请记得选择任意一个VAE文件,否则图形将为灰色 - This model comes with VAE. If your program does not support it, please remember to select any VAE file, otherwise the graphics will be gray # 如何使用 - How to use **TIP:经过一天的测试,发现很多人物可能出现红眼问题,可以尝试在负面词添加red eyes。如果色彩艳丽可以尝试降低CFG - After a day of testing, we found that many characters may have red-eye problems. We can try to add red eyes to negative words。Try to reduce CFG if the color is bright** 简单:第三代大幅度减少上手难度 - Simple: the third generation greatly reduces the difficulty of getting started - **关键词 - key word:** ``` best quality, masterpiece, highres, 1girl,china dress,Beautiful face ``` - **负面词 - Negative words:** ``` NSFW, lowres,bad anatomy,bad hands, text, error, missing fingers,extra digit, fewer digits, cropped, worstquality, low quality, normal quality,jpegartifacts,signature, watermark, username,blurry,bad feet ``` --- 高级:如果您还想使图片尽可能更好,请尝试以下配置 - senior:If you also want to make the picture as better as possible, please try the following configuration - Sampling steps:**50** - Sampler:**DPM++ SDE Karras or DDIM** - The size of the picture should be at least **1024** - 图片大小至少1024 - CFG:**4-6** - **更好的负面词 Better negative words - 感谢群友提供的负面词:** ``` (((simple background))),monochrome ,lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, lowres, bad anatomy, bad hands, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, ugly,pregnant,vore,duplicate,morbid,mut ilated,tran nsexual, hermaphrodite,long neck,mutated hands,poorly drawn hands,poorly drawn face,mutation,deformed,blurry,bad anatomy,bad proportions,malformed limbs,extra limbs,cloned face,disfigured,gross proportions, (((missing arms))),(((missing legs))), (((extra arms))),(((extra legs))),pubic hair, plump,bad legs,error legs,username,blurry,bad feet ``` - **如果想元素更丰富,可以添加下方关键词 - If you want to enrich the elements, you can add the following keywords** ``` Beautiful face, hair ornament, solo,looking at viewer,smile,closed mouth,lips china dress,dress,hair ornament, necklace, jewelry, long hair, earrings, chinese clothes, architecture,east asian architecture,building,outdoors,rooftop,city,cityscape ``` # 例图 - Examples (可在文件列表中找到原图,并放入WebUi查看关键词等信息) - (You can find the original image in the file list, and put WebUi to view keywords and other information) <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e1.png> <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e2.png> <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e3.png> <img src=https://huggingface.co/xiaolxl/GuoFeng3/resolve/main/examples/e4.png>
chainyo/speaker-recognition-meetup
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Mykolyt/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
ChaseBread/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: piyusharma/gpt2-finetuned-lex results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # piyusharma/gpt2-finetuned-lex This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.2071 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 3.3489 | 0 | | 3.2568 | 1 | | 3.2071 | 2 | ### Framework versions - Transformers 4.26.0 - TensorFlow 2.9.2 - Datasets 2.9.0 - Tokenizers 0.13.2
CleveGreen/FieldClassifier_v2
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
46
null
--- license: apache-2.0 datasets: - xnli - mlqa - paws-x language: - fr - es - en - de - sw - ru - zh - el - bg - ar - vi - th - hi - ur --- ### Disclaimer :- I don't own the weights of `ernie-m-large` neither did I train the model. I only converted the model weights from paddle to pytorch(using the scripts listed in files). The real(paddle) weights can be found [here](https://huggingface.co/PaddlePaddle/ernie-m-large). The rest of the README is copied from the same page listed above, [![paddlenlp-banner](https://user-images.githubusercontent.com/1371212/175816733-8ec25eb0-9af3-4380-9218-27c154518258.png)](https://github.com/PaddlePaddle/PaddleNLP) # PaddlePaddle/ernie-m-base ## Ernie-M ERNIE-M, proposed by Baidu, is a new training method that encourages the model to align the representation of multiple languages with monolingual corpora, to overcome the constraint that the parallel corpus size places on the model performance. The insight is to integrate back-translation into the pre-training process by generating pseudo-parallel sentence pairs on a monolingual corpus to enable the learning of semantic alignments between different languages, thereby enhancing the semantic modeling of cross-lingual models. Experimental results show that ERNIE-M outperforms existing cross-lingual models and delivers new state-of-the-art results in various cross-lingual downstream tasks. We proposed two novel methods to align the representation of multiple languages: Cross-Attention Masked Language Modeling(CAMLM): In CAMLM, we learn the multilingual semantic representation by restoring the MASK tokens in the input sentences. Back-Translation masked language modeling(BTMLM): We use BTMLM to train our model to generate pseudo-parallel sentences from the monolingual sentences. The generated pairs are then used as the input of the model to further align the cross-lingual semantics, thus enhancing the multilingual representation. ![ernie-m](ernie_m.png) ## Benchmark ### XNLI XNLI is a subset of MNLI and has been translated into 14 different kinds of languages including some low-resource languages. The goal of the task is to predict testual entailment (whether sentence A implies / contradicts / neither sentence B). | Model | en | fr | es | de | el | bg | ru | tr | ar | vi | th | zh | hi | sw | ur | Avg | | ---------------------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | | Cross-lingual Transfer | | | | | | | | | | | | | | | | | | XLM | 85.0 | 78.7 | 78.9 | 77.8 | 76.6 | 77.4 | 75.3 | 72.5 | 73.1 | 76.1 | 73.2 | 76.5 | 69.6 | 68.4 | 67.3 | 75.1 | | Unicoder | 85.1 | 79.0 | 79.4 | 77.8 | 77.2 | 77.2 | 76.3 | 72.8 | 73.5 | 76.4 | 73.6 | 76.2 | 69.4 | 69.7 | 66.7 | 75.4 | | XLM-R | 85.8 | 79.7 | 80.7 | 78.7 | 77.5 | 79.6 | 78.1 | 74.2 | 73.8 | 76.5 | 74.6 | 76.7 | 72.4 | 66.5 | 68.3 | 76.2 | | INFOXLM | **86.4** | **80.6** | 80.8 | 78.9 | 77.8 | 78.9 | 77.6 | 75.6 | 74.0 | 77.0 | 73.7 | 76.7 | 72.0 | 66.4 | 67.1 | 76.2 | | **ERNIE-M** | 85.5 | 80.1 | **81.2** | **79.2** | **79.1** | **80.4** | **78.1** | **76.8** | **76.3** | **78.3** | **75.8** | **77.4** | **72.9** | **69.5** | **68.8** | **77.3** | | XLM-R Large | 89.1 | 84.1 | 85.1 | 83.9 | 82.9 | 84.0 | 81.2 | 79.6 | 79.8 | 80.8 | 78.1 | 80.2 | 76.9 | 73.9 | 73.8 | 80.9 | | INFOXLM Large | **89.7** | 84.5 | 85.5 | 84.1 | 83.4 | 84.2 | 81.3 | 80.9 | 80.4 | 80.8 | 78.9 | 80.9 | 77.9 | 74.8 | 73.7 | 81.4 | | VECO Large | 88.2 | 79.2 | 83.1 | 82.9 | 81.2 | 84.2 | 82.8 | 76.2 | 80.3 | 74.3 | 77.0 | 78.4 | 71.3 | **80.4** | **79.1** | 79.9 | | **ERNIR-M Large** | 89.3 | **85.1** | **85.7** | **84.4** | **83.7** | **84.5** | 82.0 | **81.2** | **81.2** | **81.9** | **79.2** | **81.0** | **78.6** | 76.2 | 75.4 | **82.0** | | Translate-Train-All | | | | | | | | | | | | | | | | | | XLM | 85.0 | 80.8 | 81.3 | 80.3 | 79.1 | 80.9 | 78.3 | 75.6 | 77.6 | 78.5 | 76.0 | 79.5 | 72.9 | 72.8 | 68.5 | 77.8 | | Unicoder | 85.6 | 81.1 | 82.3 | 80.9 | 79.5 | 81.4 | 79.7 | 76.8 | 78.2 | 77.9 | 77.1 | 80.5 | 73.4 | 73.8 | 69.6 | 78.5 | | XLM-R | 85.4 | 81.4 | 82.2 | 80.3 | 80.4 | 81.3 | 79.7 | 78.6 | 77.3 | 79.7 | 77.9 | 80.2 | 76.1 | 73.1 | 73.0 | 79.1 | | INFOXLM | 86.1 | 82.0 | 82.8 | 81.8 | 80.9 | 82.0 | 80.2 | 79.0 | 78.8 | 80.5 | 78.3 | 80.5 | 77.4 | 73.0 | 71.6 | 79.7 | | **ERNIE-M** | **86.2** | **82.5** | **83.8** | **82.6** | **82.4** | **83.4** | **80.2** | **80.6** | **80.5** | **81.1** | **79.2** | **80.5** | **77.7** | **75.0** | **73.3** | **80.6** | | XLM-R Large | 89.1 | 85.1 | 86.6 | 85.7 | 85.3 | 85.9 | 83.5 | 83.2 | 83.1 | 83.7 | 81.5 | **83.7** | **81.6** | 78.0 | 78.1 | 83.6 | | VECO Large | 88.9 | 82.4 | 86.0 | 84.7 | 85.3 | 86.2 | **85.8** | 80.1 | 83.0 | 77.2 | 80.9 | 82.8 | 75.3 | **83.1** | **83.0** | 83.0 | | **ERNIE-M Large** | **89.5** | **86.5** | **86.9** | **86.1** | **86.0** | **86.8** | 84.1 | **83.8** | **84.1** | **84.5** | **82.1** | 83.5 | 81.1 | 79.4 | 77.9 | **84.2** | ### Cross-lingual Named Entity Recognition * datasets:CoNLI | Model | en | nl | es | de | Avg | | ------------------------------ | --------- | --------- | --------- | --------- | --------- | | *Fine-tune on English dataset* | | | | | | | mBERT | 91.97 | 77.57 | 74.96 | 69.56 | 78.52 | | XLM-R | 92.25 | **78.08** | 76.53 | **69.60** | 79.11 | | **ERNIE-M** | **92.78** | 78.01 | **79.37** | 68.08 | **79.56** | | XLM-R LARGE | 92.92 | 80.80 | 78.64 | 71.40 | 80.94 | | **ERNIE-M LARGE** | **93.28** | **81.45** | **78.83** | **72.99** | **81.64** | | *Fine-tune on all dataset* | | | | | | | XLM-R | 91.08 | 89.09 | 87.28 | 83.17 | 87.66 | | **ERNIE-M** | **93.04** | **91.73** | **88.33** | **84.20** | **89.32** | | XLM-R LARGE | 92.00 | 91.60 | **89.52** | 84.60 | 89.43 | | **ERNIE-M LARGE** | **94.01** | **93.81** | 89.23 | **86.20** | **90.81** | ### Cross-lingual Question Answering * datasets:MLQA | Model | en | es | de | ar | hi | vi | zh | Avg | | ----------------- | --------------- | --------------- | --------------- | --------------- | --------------- | --------------- | --------------- | --------------- | | mBERT | 77.7 / 65.2 | 64.3 / 46.6 | 57.9 / 44.3 | 45.7 / 29.8 | 43.8 / 29.7 | 57.1 / 38.6 | 57.5 / 37.3 | 57.7 / 41.6 | | XLM | 74.9 / 62.4 | 68.0 / 49.8 | 62.2 / 47.6 | 54.8 / 36.3 | 48.8 / 27.3 | 61.4 / 41.8 | 61.1 / 39.6 | 61.6 / 43.5 | | XLM-R | 77.1 / 64.6 | 67.4 / 49.6 | 60.9 / 46.7 | 54.9 / 36.6 | 59.4 / 42.9 | 64.5 / 44.7 | 61.8 / 39.3 | 63.7 / 46.3 | | INFOXLM | 81.3 / 68.2 | 69.9 / 51.9 | 64.2 / 49.6 | 60.1 / 40.9 | 65.0 / 47.5 | 70.0 / 48.6 | 64.7 / **41.2** | 67.9 / 49.7 | | **ERNIE-M** | **81.6 / 68.5** | **70.9 / 52.6** | **65.8 / 50.7** | **61.8 / 41.9** | **65.4 / 47.5** | **70.0 / 49.2** | **65.6** / 41.0 | **68.7 / 50.2** | | XLM-R LARGE | 80.6 / 67.8 | 74.1 / 56.0 | 68.5 / 53.6 | 63.1 / 43.5 | 62.9 / 51.6 | 71.3 / 50.9 | 68.0 / 45.4 | 70.7 / 52.7 | | INFOXLM LARGE | **84.5 / 71.6** | **75.1 / 57.3** | **71.2 / 56.2** | **67.6 / 47.6** | 72.5 / 54.2 | **75.2 / 54.1** | 69.2 / 45.4 | 73.6 / 55.2 | | **ERNIE-M LARGE** | 84.4 / 71.5 | 74.8 / 56.6 | 70.8 / 55.9 | 67.4 / 47.2 | **72.6 / 54.7** | 75.0 / 53.7 | **71.1 / 47.5** | **73.7 / 55.3** | ### Cross-lingual Paraphrase Identification * datasets:PAWS-X | Model | en | de | es | fr | ja | ko | zh | Avg | | ---------------------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | -------- | | Cross-lingual Transfer | | | | | | | | | | mBERT | 94.0 | 85.7 | 87.4 | 87.0 | 73.0 | 69.6 | 77.0 | 81.9 | | XLM | 94.0 | 85.9 | 88.3 | 87.4 | 69.3 | 64.8 | 76.5 | 80.9 | | MMTE | 93.1 | 85.1 | 87.2 | 86.9 | 72.0 | 69.2 | 75.9 | 81.3 | | XLM-R LARGE | 94.7 | 89.7 | 90.1 | 90.4 | 78.7 | 79.0 | 82.3 | 86.4 | | VECO LARGE | **96.2** | 91.3 | 91.4 | 92.0 | 81.8 | 82.9 | 85.1 | 88.7 | | **ERNIE-M LARGE** | 96.0 | **91.9** | **91.4** | **92.2** | **83.9** | **84.5** | **86.9** | **89.5** | | Translate-Train-All | | | | | | | | | | VECO LARGE | 96.4 | 93.0 | 93.0 | 93.5 | 87.2 | 86.8 | 87.9 | 91.1 | | **ERNIE-M LARGE** | **96.5** | **93.5** | **93.3** | **93.8** | **87.9** | **88.4** | **89.2** | **91.8** | ### Cross-lingual Sentence Retrieval * dataset:Tatoeba | Model | Avg | | --------------------------------------- | -------- | | XLM-R LARGE | 75.2 | | VECO LARGE | 86.9 | | **ERNIE-M LARGE** | **87.9** | | **ERNIE-M LARGE( after fine-tuning)** | **93.3** | ## Citation Info ```text @article{Ouyang2021ERNIEMEM, title={ERNIE-M: Enhanced Multilingual Representation by Aligning Cross-lingual Semantics with Monolingual Corpora}, author={Xuan Ouyang and Shuohuan Wang and Chao Pang and Yu Sun and Hao Tian and Hua Wu and Haifeng Wang}, journal={ArXiv}, year={2021}, volume={abs/2012.15674} } ```
CodeNinja1126/xlm-roberta-large-kor-mrc
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer model-index: - name: gpt_16_4_3e-5_lp5_nb5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt_16_4_3e-5_lp5_nb5 This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.8872 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 3.311 | 0.38 | 1000 | 4.0339 | | 3.1133 | 0.76 | 2000 | 3.9777 | | 2.9875 | 1.13 | 3000 | 3.9546 | | 2.8697 | 1.51 | 4000 | 3.9269 | | 2.8669 | 1.89 | 5000 | 3.9159 | | 2.7308 | 2.27 | 6000 | 3.9066 | | 2.709 | 2.64 | 7000 | 3.8995 | | 2.6979 | 3.02 | 8000 | 3.8976 | | 2.5878 | 3.4 | 9000 | 3.8978 | | 2.5824 | 3.78 | 10000 | 3.8872 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.9.0+cu102 - Datasets 2.8.0 - Tokenizers 0.13.2
CoffeeAddict93/gpt1-modest-proposal
[ "pytorch", "openai-gpt", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "OpenAIGPTLMHeadModel" ], "model_type": "openai-gpt", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: cc-by-4.0 tags: - generated_from_trainer model-index: - name: minilm-uncased-squad2-finetuned-squad-12-trainedfor-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # minilm-uncased-squad2-finetuned-squad-12-trainedfor-3 This model is a fine-tuned version of [deepset/minilm-uncased-squad2](https://huggingface.co/deepset/minilm-uncased-squad2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.6181 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-08 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.6694 | 1.0 | 578 | 0.6175 | | 0.681 | 2.0 | 1156 | 0.6180 | | 0.6829 | 3.0 | 1734 | 0.6181 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Connor-tech/bert_cn_finetuning
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-28jan-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-28jan-2 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5078 - Rouge1: 18.7485 - Rouge2: 5.8034 - Rougel: 18.5163 - Rougelsum: 18.4817 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 9 - eval_batch_size: 9 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 5.9652 | 1.0 | 242 | 2.8048 | 14.036 | 4.037 | 13.7766 | 13.8254 | | 3.474 | 2.0 | 484 | 2.7485 | 16.821 | 4.8051 | 16.6168 | 16.5782 | | 3.2101 | 3.0 | 726 | 2.6444 | 17.2659 | 5.1077 | 16.9501 | 16.8998 | | 3.0555 | 4.0 | 968 | 2.6408 | 17.3002 | 4.8657 | 17.0414 | 16.9794 | | 2.9515 | 5.0 | 1210 | 2.5860 | 17.6468 | 5.3816 | 17.3755 | 17.3434 | | 2.8694 | 6.0 | 1452 | 2.5586 | 18.3932 | 5.3896 | 18.2521 | 18.0748 | | 2.7898 | 7.0 | 1694 | 2.5325 | 18.4954 | 5.5609 | 18.2994 | 18.2112 | | 2.7436 | 8.0 | 1936 | 2.5431 | 18.8172 | 5.9338 | 18.4693 | 18.4324 | | 2.6955 | 9.0 | 2178 | 2.5588 | 18.7895 | 6.1003 | 18.3593 | 18.3268 | | 2.6571 | 10.0 | 2420 | 2.5079 | 19.2525 | 5.8268 | 19.0279 | 18.9846 | | 2.629 | 11.0 | 2662 | 2.5118 | 18.9191 | 5.9877 | 18.6505 | 18.6 | | 2.5998 | 12.0 | 2904 | 2.5070 | 18.7181 | 5.9061 | 18.4432 | 18.3931 | | 2.5692 | 13.0 | 3146 | 2.5014 | 18.4412 | 6.1983 | 18.2394 | 18.1618 | | 2.5751 | 14.0 | 3388 | 2.5125 | 18.7014 | 5.9729 | 18.4366 | 18.406 | | 2.55 | 15.0 | 3630 | 2.5078 | 18.7485 | 5.8034 | 18.5163 | 18.4817 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
CrypticT1tan/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - token-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - ankleBowl/autotrain-data-lucy-light-control co2_eq_emissions: emissions: 0.5335980780308736 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 3122788375 - CO2 Emissions (in grams): 0.5336 ## Validation Metrics - Loss: 0.003 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - F1: 1.000 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/ankleBowl/autotrain-lucy-light-control-3122788375 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("ankleBowl/autotrain-lucy-light-control-3122788375", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("ankleBowl/autotrain-lucy-light-control-3122788375", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Crystal/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - HalfCheetahBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: HalfCheetahBulletEnv-v0 type: HalfCheetahBulletEnv-v0 metrics: - type: mean_reward value: 827.20 +/- 108.15 name: mean_reward verified: false --- # **A2C** Agent playing **HalfCheetahBulletEnv-v0** This is a trained model of a **A2C** agent playing **HalfCheetahBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Culmenus/XLMR-ENIS-finetuned-ner
[ "pytorch", "tensorboard", "xlm-roberta", "token-classification", "dataset:mim_gold_ner", "transformers", "generated_from_trainer", "license:agpl-3.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
This is a mix of two models. Dreamshaper (%70) + AnythingBmix (%30). I am not a programmer and I have no idea what I am doing.
Culmenus/opus-mt-de-is-finetuned-de-to-is_35g65cc_2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - vision - image-classification datasets: - Lloviant/autotrain-data-ex-and-pt widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.6202842405816136 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 3122688386 - CO2 Emissions (in grams): 0.6203 ## Validation Metrics - Loss: 1.338 - Accuracy: 0.571 - Macro F1: 0.389 - Micro F1: 0.571 - Weighted F1: 0.429 - Macro Precision: 0.333 - Micro Precision: 0.571 - Weighted Precision: 0.357 - Macro Recall: 0.500 - Micro Recall: 0.571 - Weighted Recall: 0.571
Culmenus/opus-mt-de-is-finetuned-de-to-is_ancc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - vision - image-classification datasets: - Lloviant/autotrain-data-ex-and-pt widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.5722366196083666 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 3122688387 - CO2 Emissions (in grams): 0.5722 ## Validation Metrics - Loss: 1.749 - Accuracy: 0.571 - Macro F1: 0.444 - Micro F1: 0.571 - Weighted F1: 0.476 - Macro Precision: 0.417 - Micro Precision: 0.571 - Weighted Precision: 0.429 - Macro Recall: 0.500 - Micro Recall: 0.571 - Weighted Recall: 0.571
Culmenus/opus-mt-de-is-finetuned-de-to-is_ekkicc
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - vision - image-classification datasets: - Lloviant/autotrain-data-ex-and-pt widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.2158114227532694 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 3122688388 - CO2 Emissions (in grams): 0.2158 ## Validation Metrics - Loss: 1.818 - Accuracy: 0.000 - Macro F1: 0.000 - Micro F1: 0.000 - Weighted F1: 0.000 - Macro Precision: 0.000 - Micro Precision: 0.000 - Weighted Precision: 0.000 - Macro Recall: 0.000 - Micro Recall: 0.000 - Weighted Recall: 0.000
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2-finetuned-de-to-is_nr2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - vision - image-classification datasets: - Lloviant/autotrain-data-ex-and-pt widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.7206152092702812 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 3122688389 - CO2 Emissions (in grams): 0.7206 ## Validation Metrics - Loss: 1.599 - Accuracy: 0.286 - Macro F1: 0.250 - Micro F1: 0.286 - Weighted F1: 0.286 - Macro Precision: 0.250 - Micro Precision: 0.286 - Weighted Precision: 0.286 - Macro Recall: 0.250 - Micro Recall: 0.286 - Weighted Recall: 0.286
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - autotrain - vision - image-classification datasets: - Lloviant/autotrain-data-ex-and-pt widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.42285127723587795 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 3122688390 - CO2 Emissions (in grams): 0.4229 ## Validation Metrics - Loss: 1.919 - Accuracy: 0.286 - Macro F1: 0.214 - Micro F1: 0.286 - Weighted F1: 0.184 - Macro Precision: 0.194 - Micro Precision: 0.286 - Weighted Precision: 0.167 - Macro Recall: 0.333 - Micro Recall: 0.286 - Weighted Recall: 0.286
CurtisASmith/GPT-JRT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 --- Evaluate nynorsk translateion
CurtisBowser/DialoGPT-medium-sora-two
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - object-detection - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/valorant-object-detection model-index: - name: keremberke/yolov8m-valorant-detection results: - task: type: object-detection dataset: type: keremberke/valorant-object-detection name: valorant-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.96466 # min: 0.0 - max: 1.0 name: [email protected](box) --- <div align="center"> <img width="640" alt="keremberke/yolov8m-valorant-detection" src="https://huggingface.co/keremberke/yolov8m-valorant-detection/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['dropped spike', 'enemy', 'planted spike', 'teammate'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8m-valorant-detection') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Czapla/Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: nikz/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
D3xter1922/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - beans metrics: - accuracy model-index: - name: vit-model-beimer results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans config: default split: validation args: default metrics: - name: Accuracy type: accuracy value: 0.9849624060150376 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model-beimer This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0637 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1394 | 3.85 | 500 | 0.0637 | 0.9850 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
D4RL1NG/yes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-28T22:12:48Z
--- library_name: stable-baselines3 tags: - HalfCheetahBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: TQC results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: HalfCheetahBulletEnv-v0 type: HalfCheetahBulletEnv-v0 metrics: - type: mean_reward value: 2919.24 +/- 8.00 name: mean_reward verified: false --- # **TQC** Agent playing **HalfCheetahBulletEnv-v0** This is a trained model of a **TQC** agent playing **HalfCheetahBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-large-v2-japanese-5k-steps results: [] datasets: - mozilla-foundation/common_voice_11_0 language: - ja --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-large-v2-japanese-5k-steps This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the Japanese CommonVoice dataset (v11).. It achieves the following results on the evaluation set: - Loss: 0.4200 - Wer: 0.7449 ## Model description This model is finetuned for 5000 steps for research purposes which means that the transcriptions might not be that satisfactory for users. ## Training and evaluation data - Training Data: CommonVoice (v11) train split - Validation Data: CommonVoice (v11) Validation split - Test Data: CommonVoice (v11) Test split ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 50 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0111 | 7.63 | 1000 | 0.3210 | 0.7888 | | 0.0007 | 15.27 | 2000 | 0.3585 | 0.7478 | | 0.0003 | 22.9 | 3000 | 0.3937 | 0.7432 | | 0.0002 | 30.53 | 4000 | 0.4123 | 0.7443 | | 0.0002 | 38.17 | 5000 | 0.4200 | 0.7449 | ### Transcription ```python from datasets import load_dataset, Audio import torch from transformers import WhisperProcessor, WhisperForConditionalGeneration # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load the model processor = WhisperProcessor.from_pretrained("clu-ling/whisper-large-v2-japanese-5k-steps") model = WhisperForConditionalGeneration.from_pretrained("clu-ling/whisper-large-v2-japanese-5k-steps").to(device) forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") # load the dataset commonvoice_eval = load_dataset("mozilla-foundation/common_voice_11_0", "ja", split="validation", streaming=True) commonvoice_eval = commonvoice_eval.cast_column("audio", Audio(sampling_rate=16000)) sample = next(iter(commonvoice_eval))["audio"] # features and generate token ids input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features predicted_ids = model.generate(input_features.to(device), forced_decoder_ids=forced_decoder_ids) # decode transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) print(transcription) ``` ### Evaluation: Evaluates this model on `mozilla-foundation/common_voice_11_0` test split. ```python from transformers.models.whisper.english_normalizer import BasicTextNormalizer from datasets import load_dataset, Audio import evaluate import torch import re from transformers import WhisperProcessor, WhisperForConditionalGeneration # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # metric wer_metric = evaluate.load("wer") # model processor = WhisperProcessor.from_pretrained("clu-ling/whisper-large-v2-japanese-5k-steps") model = WhisperForConditionalGeneration.from_pretrained("clu-ling/whisper-large-v2-japanese-5k-steps") # dataset dataset = load_dataset("mozilla-foundation/common_voice_11_0", "ja", split="test", ) #cache_dir=args.cache_dir dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) #for debuggings: it gets some examples #dataset = dataset.shard(num_shards=7000, index=0) #print(dataset) def normalize(batch): batch["gold_text"] = whisper_norm(batch['sentence']) return batch def map_wer(batch): model.to(device) forced_decoder_ids = processor.get_decoder_prompt_ids(language = "ja", task = "transcribe") inputs = processor(batch["audio"]["array"], sampling_rate=batch["audio"]["sampling_rate"], return_tensors="pt").input_features with torch.no_grad(): generated_ids = model.generate(inputs=inputs.to(device), forced_decoder_ids=forced_decoder_ids) transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] batch["predicted_text"] = whisper_norm(transcription) return batch # process GOLD text processed_dataset = dataset.map(normalize) # get predictions predicted = processed_dataset.map(map_wer) # word error rate wer = wer_metric.compute(references=predicted['gold_text'], predictions=predicted['predicted_text']) wer = round(100 * wer, 2) print("WER:", wer) ``` ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1 - Datasets 2.8.1.dev0 - Tokenizers 0.13.2
DHBaek/gpt2-stackoverflow-question-contents-generator
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 283.52 +/- 12.47 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DJSammy/bert-base-danish-uncased_BotXO-ai
[ "pytorch", "jax", "da", "dataset:common_crawl", "dataset:wikipedia", "transformers", "bert", "masked-lm", "license:cc-by-4.0", "fill-mask" ]
fill-mask
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-PixelCopterLocalv1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 64.10 +/- 41.10 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DJStomp/TestingSalvoNET
[ "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_mrpc results: - task: name: Text Classification type: text-classification dataset: name: GLUE MRPC type: glue config: mrpc split: validation args: mrpc metrics: - name: Accuracy type: accuracy value: 0.3161764705882353 - name: F1 type: f1 value: 0.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_mrpc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset. It achieves the following results on the evaluation set: - Loss: 0.5207 - Accuracy: 0.3162 - F1: 0.0 - Combined Score: 0.1581 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:| | 0.564 | 1.0 | 15 | 0.5300 | 0.3162 | 0.0 | 0.1581 | | 0.533 | 2.0 | 30 | 0.5323 | 0.3162 | 0.0 | 0.1581 | | 0.5302 | 3.0 | 45 | 0.5290 | 0.3162 | 0.0 | 0.1581 | | 0.5312 | 4.0 | 60 | 0.5289 | 0.3162 | 0.0 | 0.1581 | | 0.527 | 5.0 | 75 | 0.5306 | 0.3162 | 0.0 | 0.1581 | | 0.5229 | 6.0 | 90 | 0.5207 | 0.3162 | 0.0 | 0.1581 | | 0.5088 | 7.0 | 105 | 0.5358 | 0.5539 | 0.5806 | 0.5673 | | 0.5003 | 8.0 | 120 | 0.5299 | 0.4902 | 0.4611 | 0.4757 | | 0.4825 | 9.0 | 135 | 0.5323 | 0.3627 | 0.1824 | 0.2726 | | 0.4628 | 10.0 | 150 | 0.5373 | 0.5196 | 0.5377 | 0.5287 | | 0.451 | 11.0 | 165 | 0.5513 | 0.5417 | 0.5854 | 0.5635 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
DSI/TweetBasedSA
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-large-v2-arabic-5k-steps results: [] datasets: - mozilla-foundation/common_voice_11_0 language: - ar --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-large-v2-arabic-5k-steps This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the Arabic CommonVoice dataset (v11). It achieves the following results on the evaluation set: - Loss: 0.3434 - Wer: 0.4239 ## Model description This model is finetuned for 5000 steps for research purposes which means that the transcriptions might not be that satisfactory for users. ## Training and evaluation data - Training Data: CommonVoice (v11) train split - Validation Data: CommonVoice (v11) Validation split - Test Data: CommonVoice (v11) Test split ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 50 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1638 | 1.78 | 1000 | 0.2295 | 0.4410 | | 0.0587 | 3.57 | 2000 | 0.2337 | 0.4272 | | 0.0125 | 5.35 | 3000 | 0.2745 | 0.4208 | | 0.004 | 7.13 | 4000 | 0.3124 | 0.4252 | | 0.0016 | 8.91 | 5000 | 0.3434 | 0.4239 | ### Transcription: ```python from datasets import load_dataset, Audio import torch from transformers import WhisperProcessor, WhisperForConditionalGeneration # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load the model processor = WhisperProcessor.from_pretrained("clu-ling/whisper-large-v2-arabic-5k-steps") model = WhisperForConditionalGeneration.from_pretrained("clu-ling/whisper-large-v2-arabic-5k-steps").to(device) forced_decoder_ids = processor.get_decoder_prompt_ids(language="ar", task="transcribe") # load the dataset commonvoice_eval = load_dataset("mozilla-foundation/common_voice_11_0", "ar", split="validation", streaming=True) commonvoice_eval = commonvoice_eval.cast_column("audio", Audio(sampling_rate=16000)) sample = next(iter(commonvoice_eval))["audio"] # features and generate token ids input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features predicted_ids = model.generate(input_features.to(device), forced_decoder_ids=forced_decoder_ids) # decode transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] print("Transcription:", transcription) Transcription: عمي هو أخو أبي. ``` ### Evaluation: Evaluates this model on `mozilla-foundation/common_voice_11_0` test split. ```python import pyarabic.araby as araby from transformers.models.whisper.english_normalizer import BasicTextNormalizer from datasets import load_dataset, Audio import evaluate import torch import re from transformers import WhisperProcessor, WhisperForConditionalGeneration # device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # metric wer_metric = evaluate.load("wer") # model processor = WhisperProcessor.from_pretrained("clu-ling/whisper-large-v2-arabic-5k-steps") model = WhisperForConditionalGeneration.from_pretrained("clu-ling/whisper-large-v2-arabic-5k-steps") # dataset dataset = load_dataset("mozilla-foundation/common_voice_11_0", "ar", split="test", ) #cache_dir=args.cache_dir dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) #for debuggings: it gets two examples #dataset = dataset.shard(num_shards=10000, index=0) #print(dataset) def clean_text(text): """Normalizes TRANSCRIPT""" text = re.sub(r'[\,\?\.\!\-\;\:\"\“\%\٪\‘\”\�\«\»\،\.\:\؟\؛\*\>\<]', '', text) + " " # special characters text = re.sub(r'http\S+', '', text) + " " # links text = re.sub(r'[\[\]\(\)\-\/\{\}]', '', text) + " " # brackets text = re.sub(r'\s+', ' ', text) + " " # extra white space text = araby.strip_diacritics(text) # remove diacrirics return text.strip() def normalize(batch): """Normalizes GOLD""" #batch["gold_text"] = whisper_norm(batch['sentence']) batch["gold_text"] = clean_text(batch['sentence']) return batch def map_wer(batch): model.to(device) forced_decoder_ids = processor.get_decoder_prompt_ids(language = "ar", task = "transcribe") inputs = processor(batch["audio"]["array"], sampling_rate=batch["audio"]["sampling_rate"], return_tensors="pt").input_features with torch.no_grad(): generated_ids = model.generate(inputs=inputs.to(device), forced_decoder_ids=forced_decoder_ids) transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] batch["predicted_text"] = clean_text(transcription) return batch # process GOLD text processed_dataset = dataset.map(normalize) # get predictions predicted = processed_dataset.map(map_wer) # word error rate wer = wer_metric.compute(references=predicted['gold_text'], predictions=predicted['predicted_text']) wer = round(100 * wer, 2) print("WER:", wer) ``` ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1 - Datasets 2.8.1.dev0 - Tokenizers 0.13.2
alexandrainst/da-hatespeech-detection-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,719
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_qnli_256 results: - task: name: Text Classification type: text-classification dataset: name: GLUE QNLI type: glue config: qnli split: validation args: qnli metrics: - name: Accuracy type: accuracy value: 0.5874061870766978 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_qnli_256 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.3989 - Accuracy: 0.5874 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4156 | 1.0 | 410 | 0.4111 | 0.5054 | | 0.4078 | 2.0 | 820 | 0.4018 | 0.5799 | | 0.3962 | 3.0 | 1230 | 0.3989 | 0.5874 | | 0.3899 | 4.0 | 1640 | 0.4018 | 0.5867 | | 0.3851 | 5.0 | 2050 | 0.4032 | 0.5799 | | 0.3802 | 6.0 | 2460 | 0.4118 | 0.5728 | | 0.3762 | 7.0 | 2870 | 0.4093 | 0.5718 | | 0.3717 | 8.0 | 3280 | 0.4100 | 0.5737 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
DataikuNLP/average_word_embeddings_glove.6B.300d
[ "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "license:apache-2.0" ]
sentence-similarity
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-01-29T01:53:17Z
--- tags: - autotrain - vision - image-classification license: mit widget: - src: https://files.catbox.moe/72xdjy.png example_title: Furry Avatar #1 - src: https://files.catbox.moe/22bao8.jpg example_title: Furry Avatar #2 - src: https://files.catbox.moe/xahs5m.png example_title: Normal Animal Avatar #1 - src: https://files.catbox.moe/6zvcpu.png example_title: Normal Animal Avatar #2 - src: https://files.catbox.moe/gcltc9.png example_title: Kemonomimi Avatar #1 - src: https://files.catbox.moe/w4vcoc.png example_title: Kemonomimi Avatar #2 - src: https://files.catbox.moe/ujfzv0.png example_title: Human Avatar #1 - src: https://files.catbox.moe/yxx1qz.jpg example_title: Human Avatar #2 - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-1.jpg example_title: Normal Cat :3 co2_eq_emissions: emissions: 2.8752228959859316 --- This detects furry images, mostly profile pictures, although it may be able detect any sort of furry picture (I haven't tried it, though). # Dataset Info This was trained on scraped pfp images from Mastodon, with some non-pfp images thrown in for "balancing" (i.e ensuring pokemon, kemonomimi (catgirls/foxgirls/etc), and normal animals weren't classified as 'furry') **Furry images**: 551 **Non-furry images**: 641 # Disclaimer Please do not ruin this by using this to harass anyone. This is *not* intended to be used for targeted harrassement, and I will explicitly condemn any use that attempts to do so. If you're wondering why I made this public in the first place? I believe in freedom of *information* - this image classification model has various perfectly valid uses, and it's kinda useless to keep it private. # Statistics ## Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2890884434 - CO2 Emissions (in grams): 2.8752 ## Validation Metrics - Loss: 0.175 - Accuracy: 0.933 - Precision: 0.938 - Recall: 0.938 - AUC: 0.975 - F1: 0.938
DataikuNLP/camembert-base
[ "pytorch", "tf", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-01-29T01:26:35Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy - f1 model-index: - name: mobilebert_add_GLUE_Experiment_logit_kd_qqp results: - task: name: Text Classification type: text-classification dataset: name: GLUE QQP type: glue config: qqp split: validation args: qqp metrics: - name: Accuracy type: accuracy value: 0.756987385604749 - name: F1 type: f1 value: 0.604929832321364 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mobilebert_add_GLUE_Experiment_logit_kd_qqp This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE QQP dataset. It achieves the following results on the evaluation set: - Loss: 0.8079 - Accuracy: 0.7570 - F1: 0.6049 - Combined Score: 0.6810 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score | |:--------------------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:| | 1.2837 | 1.0 | 2843 | 1.2201 | 0.6318 | 0.0 | 0.3159 | | 1.076 | 2.0 | 5686 | 0.8477 | 0.7443 | 0.5855 | 0.6649 | | 0.866 | 3.0 | 8529 | 0.8217 | 0.7518 | 0.5924 | 0.6721 | | 0.8317 | 4.0 | 11372 | 0.8136 | 0.7565 | 0.6243 | 0.6904 | | 0.8122 | 5.0 | 14215 | 0.8126 | 0.7588 | 0.6352 | 0.6970 | | 0.799 | 6.0 | 17058 | 0.8079 | 0.7570 | 0.6049 | 0.6810 | | 386581134871678353408.0000 | 7.0 | 19901 | nan | 0.6318 | 0.0 | 0.3159 | | 0.0 | 8.0 | 22744 | nan | 0.6318 | 0.0 | 0.3159 | | 0.0 | 9.0 | 25587 | nan | 0.6318 | 0.0 | 0.3159 | | 0.0 | 10.0 | 28430 | nan | 0.6318 | 0.0 | 0.3159 | | 0.0 | 11.0 | 31273 | nan | 0.6318 | 0.0 | 0.3159 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
DataikuNLP/paraphrase-albert-small-v2
[ "pytorch", "albert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "AlbertModel" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
628
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_rte_384 results: - task: name: Text Classification type: text-classification dataset: name: GLUE RTE type: glue config: rte split: validation args: rte metrics: - name: Accuracy type: accuracy value: 0.4729241877256318 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_rte_384 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE RTE dataset. It achieves the following results on the evaluation set: - Loss: 0.4233 - Accuracy: 0.4729 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4263 | 1.0 | 10 | 0.4273 | 0.4729 | | 0.4195 | 2.0 | 20 | 0.4268 | 0.4729 | | 0.4189 | 3.0 | 30 | 0.4236 | 0.4729 | | 0.417 | 4.0 | 40 | 0.4250 | 0.4729 | | 0.4192 | 5.0 | 50 | 0.4249 | 0.4729 | | 0.417 | 6.0 | 60 | 0.4238 | 0.4729 | | 0.4182 | 7.0 | 70 | 0.4233 | 0.4729 | | 0.4188 | 8.0 | 80 | 0.4235 | 0.4729 | | 0.4174 | 9.0 | 90 | 0.4237 | 0.4729 | | 0.4169 | 10.0 | 100 | 0.4244 | 0.4729 | | 0.4188 | 11.0 | 110 | 0.4237 | 0.4729 | | 0.417 | 12.0 | 120 | 0.4237 | 0.4729 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,517
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: Vin16-P3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Vin16-P3 This model is a fine-tuned version of [HuyenNguyen/Vin11-P3](https://huggingface.co/HuyenNguyen/Vin11-P3) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4000 - Wer: 25.7994 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 150 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.3252 | 0.27 | 50 | 0.3806 | 24.3160 | | 0.2973 | 0.53 | 100 | 0.3923 | 24.8214 | | 0.2815 | 0.8 | 150 | 0.4000 | 25.7994 | ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Davlan/bert-base-multilingual-cased-finetuned-igbo
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_mnli_384 results: - task: name: Text Classification type: text-classification dataset: name: GLUE MNLI type: glue config: mnli split: validation_matched args: mnli metrics: - name: Accuracy type: accuracy value: 0.576993490642799 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_mnli_384 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.5304 - Accuracy: 0.5770 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.6035 | 1.0 | 1534 | 0.5764 | 0.4805 | | 0.5667 | 2.0 | 3068 | 0.5578 | 0.5171 | | 0.5542 | 3.0 | 4602 | 0.5520 | 0.5243 | | 0.5447 | 4.0 | 6136 | 0.5460 | 0.5422 | | 0.5338 | 5.0 | 7670 | 0.5387 | 0.5671 | | 0.5172 | 6.0 | 9204 | 0.5304 | 0.5781 | | 0.4993 | 7.0 | 10738 | 0.5333 | 0.5847 | | 0.482 | 8.0 | 12272 | 0.5317 | 0.5901 | | 0.4654 | 9.0 | 13806 | 0.5323 | 0.5949 | | 0.4504 | 10.0 | 15340 | 0.5368 | 0.5957 | | 0.4369 | 11.0 | 16874 | 0.5405 | 0.5980 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
Davlan/bert-base-multilingual-cased-finetuned-wolof
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - allenai/nllb --- # Ramos-Ramos/xlm-roberta-base-en-tl-0-4000 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Ramos-Ramos/xlm-roberta-base-en-tl-0-4000') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-4000') model = AutoModel.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-4000') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Ramos-Ramos/xlm-roberta-base-en-tl-0-4000) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 12406 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Davlan/distilbert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "distilbert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
123,856
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - allenai/nllb --- # Ramos-Ramos/xlm-roberta-base-en-tl-0-6000 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Ramos-Ramos/xlm-roberta-base-en-tl-0-6000') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-6000') model = AutoModel.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-6000') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Ramos-Ramos/xlm-roberta-base-en-tl-0-6000) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 12406 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Davlan/xlm-roberta-base-finetuned-english
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit datasets: - DarwinAnim8or/greentext language: - en tags: - fun - greentext widget: - text: ">be me" example_title: "be me" co2_eq_emissions: emissions: 60 source: "https://mlco2.github.io/impact/#compute" training_type: "fine-tuning" geographical_location: "Oregon, USA" hardware_used: "1 T4, Google Colab" --- # GPT-Greentext-355m A finetuned version of [GPT2-Medium](https://huggingface.co/gpt2-medium) on the 'greentext' dataset. (Linked above) A demo is available [here](https://huggingface.co/spaces/DarwinAnim8or/GPT-Greentext-Playground) The demo playground is recommended over the inference box on the right. The largest model in this series is located here: [GPT-Greentext-1.5b](https://huggingface.co/DarwinAnim8or/GPT-Greentext-1.5b) # Training Procedure This was trained on the 'greentext' dataset, using the "HappyTransformers" library on Google Colab. This model was trained for 15 epochs with learning rate 1e-2. # Biases & Limitations This likely contains the same biases and limitations as the original GPT2 that it is based on, and additionally heavy biases from the greentext dataset. It likely will generate offensive output. # Intended Use This model is meant for fun, nothing else. # Sample Use ```python #Import model: from happytransformer import HappyGeneration happy_gen = HappyGeneration("GPT2", "DarwinAnim8or/GPT-Greentext-355m") #Set generation settings: from happytransformer import GENSettings args_top_k = GENSettingsGENSettings(no_repeat_ngram_size=3, do_sample=True, top_k=80, temperature=0.8, max_length=150, early_stopping=False) #Generate a response: result = happy_gen.generate_text(""">be me >""", args=args_top_k) print(result) print(result.text) ```
Davlan/xlm-roberta-base-finetuned-luganda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: creativeml-openrail-m tags: - coreml - stable-diffusion - text-to-image --- # Core ML Converted Model: - This model was converted to [Core ML for use on Apple Silicon devices](https://github.com/apple/ml-stable-diffusion). Conversion instructions can be found [here](https://github.com/godly-devotion/MochiDiffusion/wiki/How-to-convert-ckpt-or-safetensors-files-to-Core-ML).<br> - Provide the model to an app such as Mochi Diffusion [Github](https://github.com/godly-devotion/MochiDiffusion) - [Discord](https://discord.gg/x2kartzxGv) to generate images.<br> - `split_einsum` version is compatible with all compute unit options including Neural Engine.<br> - `original` version is only compatible with CPU & GPU option.<br> - Custom resolution versions are tagged accordingly.<br> - `vae` tagged files have a vae embedded into the model.<br> - Descriptions are posted as-is from original model source. Not all features and/or results may be available in CoreML format.<br> - This model was converted with `vae-encoder` for i2i. - Models that are 32 bit will have "fp32" in the filename. # Note: Some models do not have the [unet split into chunks](https://github.com/apple/ml-stable-diffusion#-converting-models-to-core-ml). # Elldreth's Vivid Mix: Source(s): [CivitAI](https://civitai.com/models/2747/elldreths-vivid-mix) This mixed model is a combination of my all-time favorites AND new-found favorites, including a very popular anime model mixed with Zeipher's F222, Dreamlike, and H&A's awesome 3DKX_1.0b! Lastly, to top it off I used howder's jomad model. Every single model in this mix are great on their own. This mix allows you to take advantage of combined concepts and produces some great images. What's it good at? Realistic portraits Stylized characters Landscapes Fantasy Sci-Fi Anime Horror It's an all-around easy-to-prompt general purpose semi-realistic to realistic model that cranks out some really nice images. No trigger words required. All models were scanned prior to mixing and totally safe. So what's the difference between Vivid and all my other models? This model adds a lot more detail and realism to the images created with it and not just with portraits but landscapes as well. The other thing this model is better at is taking Textual Inversion embeddings. Lucid and Retro are both very resistant to TI Embeddings but Vivid is transformed very easily with a good embedding. What are you waiting for? Go get some great results from simple prompts. What's new in v2.0? Wow wow wow.. two big model releases have kept me busy testing and prompting. F222 was replaced by Hassan's newest model release The new H&A 3DKX Update replaced the older version wavymulder's portrait+ was added Dreamlike was udpated in the mix as well The end result is a lot more realistic and vivid outcome. I used the same prompt to generate the new preview images as were used in v1.0.
Davlan/xlm-roberta-base-finetuned-naija
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-01-29T03:05:38Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_wnli_96 results: - task: name: Text Classification type: text-classification dataset: name: GLUE WNLI type: glue config: wnli split: validation args: wnli metrics: - name: Accuracy type: accuracy value: 0.5633802816901409 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_wnli_96 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE WNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.3442 - Accuracy: 0.5634 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3478 | 1.0 | 3 | 0.3444 | 0.5634 | | 0.3472 | 2.0 | 6 | 0.3445 | 0.5634 | | 0.3467 | 3.0 | 9 | 0.3444 | 0.5634 | | 0.3476 | 4.0 | 12 | 0.3442 | 0.5634 | | 0.3476 | 5.0 | 15 | 0.3442 | 0.5634 | | 0.3471 | 6.0 | 18 | 0.3446 | 0.5634 | | 0.3473 | 7.0 | 21 | 0.3449 | 0.5634 | | 0.3471 | 8.0 | 24 | 0.3451 | 0.5634 | | 0.3477 | 9.0 | 27 | 0.3452 | 0.5634 | | 0.3469 | 10.0 | 30 | 0.3451 | 0.5634 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-somali
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: distilbert_add_GLUE_Experiment_logit_kd_mnli_96 results: - task: name: Text Classification type: text-classification dataset: name: GLUE MNLI type: glue config: mnli split: validation_matched args: mnli metrics: - name: Accuracy type: accuracy value: 0.5239015459723352 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_add_GLUE_Experiment_logit_kd_mnli_96 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.5576 - Accuracy: 0.5239 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.624 | 1.0 | 1534 | 0.6178 | 0.3605 | | 0.6176 | 2.0 | 3068 | 0.6138 | 0.3767 | | 0.6139 | 3.0 | 4602 | 0.6112 | 0.3822 | | 0.6104 | 4.0 | 6136 | 0.6071 | 0.3977 | | 0.6027 | 5.0 | 7670 | 0.5978 | 0.4091 | | 0.5958 | 6.0 | 9204 | 0.6104 | 0.4151 | | 0.5877 | 7.0 | 10738 | 0.5963 | 0.4517 | | 0.5787 | 8.0 | 12272 | 0.6054 | 0.4627 | | 0.5711 | 9.0 | 13806 | 0.5753 | 0.4905 | | 0.5641 | 10.0 | 15340 | 0.5713 | 0.4987 | | 0.5583 | 11.0 | 16874 | 0.5645 | 0.5115 | | 0.5535 | 12.0 | 18408 | 0.5646 | 0.5117 | | 0.549 | 13.0 | 19942 | 0.5692 | 0.5176 | | 0.5456 | 14.0 | 21476 | 0.5613 | 0.5220 | | 0.5425 | 15.0 | 23010 | 0.5584 | 0.5302 | | 0.5399 | 16.0 | 24544 | 0.5641 | 0.5252 | | 0.5375 | 17.0 | 26078 | 0.5628 | 0.5260 | | 0.5353 | 18.0 | 27612 | 0.5659 | 0.5200 | | 0.533 | 19.0 | 29146 | 0.5676 | 0.5310 | | 0.5311 | 20.0 | 30680 | 0.5563 | 0.5323 | | 0.5291 | 21.0 | 32214 | 0.5682 | 0.5250 | | 0.5274 | 22.0 | 33748 | 0.5661 | 0.5282 | | 0.5255 | 23.0 | 35282 | 0.5673 | 0.5325 | | 0.5236 | 24.0 | 36816 | 0.5563 | 0.5416 | | 0.5219 | 25.0 | 38350 | 0.5703 | 0.5290 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-sadilar-ner
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers datasets: - allenai/nllb --- # Ramos-Ramos/xlm-roberta-base-en-tl-0-12000 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Ramos-Ramos/xlm-roberta-base-en-tl-0-12000') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-12000') model = AutoModel.from_pretrained('Ramos-Ramos/xlm-roberta-base-en-tl-0-12000') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Ramos-Ramos/xlm-roberta-base-en-tl-0-12000) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 12406 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MSELoss.MSELoss` Parameters of the fit()-Method: ``` { "epochs": 5, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Davlan/xlm-roberta-large-ner-hrl
[ "pytorch", "tf", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,322
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: Vin17-P3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Vin17-P3 This model is a fine-tuned version of [HuyenNguyen/Vin16-P3](https://huggingface.co/HuyenNguyen/Vin16-P3) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3397 - Wer: 22.4151 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 150 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1738 | 0.29 | 50 | 0.3611 | 23.1843 | | 0.1628 | 0.57 | 100 | 0.3451 | 22.7118 | | 0.1627 | 0.86 | 150 | 0.3397 | 22.4151 | ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
DeadBeast/roberta-base-pretrained-mr-2
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
# Stable Diffusion v2-1 Custom Implementation Custom implementation of the Stable Diffusion v2.1 base model for Glowforge. Forked from the [Stable Diffusion v2.1 base model](https://huggingface.co/stabilityai/stable-diffusion-2-1-base) developed by Robin Rombach and Patrick Esser.
DeadBeast/roberta-base-pretrained-mr
[ "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - object-detection - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/table-extraction model-index: - name: keremberke/yolov8s-table-extraction results: - task: type: object-detection dataset: type: keremberke/table-extraction name: table-extraction split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.98376 # min: 0.0 - max: 1.0 name: [email protected](box) --- <div align="center"> <img width="640" alt="keremberke/yolov8s-table-extraction" src="https://huggingface.co/keremberke/yolov8s-table-extraction/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['bordered', 'borderless'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8s-table-extraction') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Dean/summarsiation
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-29jan-1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-29jan-1 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.4883 - Rouge1: 19.5044 - Rouge2: 6.2046 - Rougel: 19.3543 - Rougelsum: 19.381 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 6.4829 | 1.0 | 217 | 2.7590 | 12.7914 | 3.3267 | 12.493 | 12.4137 | | 3.4814 | 2.0 | 434 | 2.7229 | 16.7805 | 4.8009 | 16.4908 | 16.5233 | | 3.2161 | 3.0 | 651 | 2.6422 | 18.3488 | 5.0629 | 18.1397 | 18.1976 | | 3.045 | 4.0 | 868 | 2.6008 | 18.1363 | 5.7597 | 17.9056 | 17.9882 | | 2.9475 | 5.0 | 1085 | 2.6061 | 18.9355 | 6.0803 | 18.6355 | 18.7673 | | 2.8547 | 6.0 | 1302 | 2.5628 | 17.904 | 5.8618 | 17.7818 | 17.8446 | | 2.7685 | 7.0 | 1519 | 2.5311 | 18.9128 | 5.9625 | 18.7142 | 18.842 | | 2.705 | 8.0 | 1736 | 2.5371 | 19.6663 | 6.0395 | 19.3416 | 19.408 | | 2.6438 | 9.0 | 1953 | 2.5427 | 19.1516 | 6.0007 | 18.9663 | 19.0156 | | 2.6086 | 10.0 | 2170 | 2.5211 | 19.0945 | 6.4325 | 18.918 | 18.9664 | | 2.5394 | 11.0 | 2387 | 2.5226 | 18.9019 | 6.3004 | 18.7281 | 18.8082 | | 2.5004 | 12.0 | 2604 | 2.5136 | 18.9701 | 6.1868 | 18.7234 | 18.8098 | | 2.4666 | 13.0 | 2821 | 2.4958 | 18.155 | 6.1513 | 18.0758 | 18.1362 | | 2.4255 | 14.0 | 3038 | 2.5101 | 18.7561 | 6.2634 | 18.6477 | 18.7123 | | 2.3856 | 15.0 | 3255 | 2.4860 | 19.2239 | 6.4539 | 19.1162 | 19.1403 | | 2.3594 | 16.0 | 3472 | 2.4905 | 19.0075 | 6.1541 | 18.9106 | 18.9616 | | 2.3301 | 17.0 | 3689 | 2.4970 | 18.7102 | 6.2065 | 18.4881 | 18.5588 | | 2.3032 | 18.0 | 3906 | 2.4744 | 19.3199 | 6.6458 | 19.1365 | 19.1733 | | 2.2825 | 19.0 | 4123 | 2.4907 | 18.9608 | 6.3074 | 18.8124 | 18.8502 | | 2.2609 | 20.0 | 4340 | 2.4772 | 19.2785 | 6.4725 | 19.0379 | 19.0556 | | 2.2384 | 21.0 | 4557 | 2.4874 | 18.9376 | 6.2922 | 18.7618 | 18.8442 | | 2.2176 | 22.0 | 4774 | 2.4853 | 18.9962 | 6.2231 | 18.7551 | 18.7958 | | 2.2095 | 23.0 | 4991 | 2.4960 | 18.6517 | 5.8114 | 18.4809 | 18.4811 | | 2.1958 | 24.0 | 5208 | 2.4911 | 18.9743 | 6.2245 | 18.7692 | 18.869 | | 2.1777 | 25.0 | 5425 | 2.4788 | 18.9623 | 6.0877 | 18.7591 | 18.7917 | | 2.1645 | 26.0 | 5642 | 2.4883 | 19.2814 | 6.2264 | 19.1407 | 19.1835 | | 2.1575 | 27.0 | 5859 | 2.4910 | 19.4592 | 6.3513 | 19.2842 | 19.3017 | | 2.142 | 28.0 | 6076 | 2.4815 | 19.3045 | 6.2179 | 19.1271 | 19.1084 | | 2.1396 | 29.0 | 6293 | 2.4858 | 19.4159 | 6.275 | 19.2582 | 19.2731 | | 2.1438 | 30.0 | 6510 | 2.4883 | 19.5044 | 6.2046 | 19.3543 | 19.381 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
DecafNosebleed/DialoGPT-small-ScaraBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://singularite.itch.io/huggy 2. Step 1: Write your model_id: shivr/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/Breitbart_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-fa-base-uncased-finetune_on_hoshfa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-fa-base-uncased-finetune_on_hoshfa This model is a fine-tuned version of [HooshvareLab/bert-fa-base-uncased](https://huggingface.co/HooshvareLab/bert-fa-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5274 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 6 - eval_batch_size: 6 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.3643 | 1.0 | 1604 | 2.1323 | | 1.5142 | 2.0 | 3208 | 2.1392 | | 0.8834 | 3.0 | 4812 | 2.5274 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
Declan/Breitbart_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1359.90 +/- 57.04 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Declan/Breitbart_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - ultralyticsplus - yolov8 - ultralytics - yolo - vision - object-detection - pytorch - awesome-yolov8-models library_name: ultralytics library_version: 8.0.21 inference: false datasets: - keremberke/table-extraction model-index: - name: keremberke/yolov8m-table-extraction results: - task: type: object-detection dataset: type: keremberke/table-extraction name: table-extraction split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.95194 # min: 0.0 - max: 1.0 name: [email protected](box) --- <div align="center"> <img width="640" alt="keremberke/yolov8m-table-extraction" src="https://huggingface.co/keremberke/yolov8m-table-extraction/resolve/main/thumbnail.jpg"> </div> ### Supported Labels ``` ['bordered', 'borderless'] ``` ### How to use - Install [ultralyticsplus](https://github.com/fcakyon/ultralyticsplus): ```bash pip install ultralyticsplus==0.0.23 ultralytics==8.0.21 ``` - Load model and perform prediction: ```python from ultralyticsplus import YOLO, render_result # load model model = YOLO('keremberke/yolov8m-table-extraction') # set model parameters model.overrides['conf'] = 0.25 # NMS confidence threshold model.overrides['iou'] = 0.45 # NMS IoU threshold model.overrides['agnostic_nms'] = False # NMS class-agnostic model.overrides['max_det'] = 1000 # maximum number of detections per image # set image image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model.predict(image) # observe results print(results[0].boxes) render = render_result(model=model, image=image, result=results[0]) render.show() ``` **More models available at: [awesome-yolov8-models](https://yolov8.xyz)**
Declan/CNN_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-01-29T05:14:49Z
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 30.90 +/- 21.54 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Declan/CNN_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m base_model: stabilityai/stable-diffusion-2-1-base instance_prompt: CharTurnerHN tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - charturnerhn These are LoRA adaption weights for [stabilityai/stable-diffusion-2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base). The weights were trained on the instance prompt "CharTurnerHN" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. Test prompt: A character turnaround of a paperboy in a blue hat. ![image_0](test_images/image_0.png) ![image_1](test_images/image_1.png) ![image_2](test_images/image_2.png) ![image_3](test_images/image_3.png)
Declan/ChicagoTribune_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: ThuyVuPhuong/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/ChicagoTribune_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - accuracy model-index: - name: mobilebert_add_GLUE_Experiment_logit_kd_rte_128 results: - task: name: Text Classification type: text-classification dataset: name: GLUE RTE type: glue config: rte split: validation args: rte metrics: - name: Accuracy type: accuracy value: 0.5270758122743683 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mobilebert_add_GLUE_Experiment_logit_kd_rte_128 This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE RTE dataset. It achieves the following results on the evaluation set: - Loss: 0.3914 - Accuracy: 0.5271 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4093 | 1.0 | 20 | 0.3914 | 0.5271 | | 0.4076 | 2.0 | 40 | 0.3922 | 0.5271 | | 0.4076 | 3.0 | 60 | 0.3917 | 0.5271 | | 0.4075 | 4.0 | 80 | 0.3920 | 0.5271 | | 0.4075 | 5.0 | 100 | 0.3925 | 0.5271 | | 0.4074 | 6.0 | 120 | 0.3915 | 0.5271 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
Declan/FoxNews_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="BlackNoodle/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```