modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
ConstellationBoi/Oop
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 261.06 +/- 28.61 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Contrastive-Tension/BERT-Base-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer metrics: - f1 model-index: - name: electricidad-base-finetuned-parmex results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # electricidad-base-finetuned-parmex This model is a fine-tuned version of [mrm8488/electricidad-base-discriminator](https://huggingface.co/mrm8488/electricidad-base-discriminator) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0372 - F1: 0.9764 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8.309269976237555e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 208 | 0.0377 | 0.9801 | | No log | 2.0 | 416 | 0.0372 | 0.9764 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Contrastive-Tension/BERT-Base-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - amazon_reviews_multi metrics: - accuracy model-index: - name: roberta-base-bne-finetuned-amazon_reviews_multi results: - task: name: Text Classification type: text-classification dataset: name: amazon_reviews_multi type: amazon_reviews_multi args: es metrics: - name: Accuracy type: accuracy value: 0.93425 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned-amazon_reviews_multi This model is a fine-tuned version of [BSC-TeMU/roberta-base-bne](https://huggingface.co/BSC-TeMU/roberta-base-bne) on the amazon_reviews_multi dataset. It achieves the following results on the evaluation set: - Loss: 0.2291 - Accuracy: 0.9343 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1909 | 1.0 | 1250 | 0.1784 | 0.9295 | | 0.1013 | 2.0 | 2500 | 0.2291 | 0.9343 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Contrastive-Tension/BERT-Distil-CT-STSb
[ "pytorch", "tf", "distilbert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8620945214069894 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1372 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2575 | 1.0 | 525 | 0.1621 | 0.8292 | | 0.1287 | 2.0 | 1050 | 0.1378 | 0.8526 | | 0.0831 | 3.0 | 1575 | 0.1372 | 0.8621 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Contrastive-Tension/BERT-Distil-CT
[ "pytorch", "tf", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - de tags: - Text Classification - sentiment - Simpletransformers - deepset/gbert-base --- This gBert-base model was finetuned on a sentiment prediction task with tweets from German politician during the German Federal Election in 2021. ## Model Description: This model was trained on ~30.000 annotated tweets in German language on its sentiment. It can predict tweets as negative, positive or neutral. It achieved an accuracy of 93% on the specific dataset. ## Model Implementation You can implement this model for example with Simpletransformers. First you have to unpack the file. def unpack_model(model_name=''): tar = tarfile.open(f"{model_name}.tar.gz", "r:gz") tar.extractall() tar.close() The hyperparameter were defined as follows: train_args ={"reprocess_input_data": True, "fp16":False, "num_train_epochs": 4, "overwrite_output_dir":True, "train_batch_size": 32, "eval_batch_size": 32} Now create the model: unpack_model(YOUR_DOWNLOADED_FILE_HERE) model = ClassificationModel( "bert", "content/outputs/", num_labels= 3, args=train_args ) In this case for the output: - 0 = positive - 1 = negative - 2 = neutral Example for a positive prediction: model.predict(["Das ist gut! Wir danken dir."]) ([0], array([[ 2.06561327, -3.57908797, 1.5340755 ]])) Example for a negative prediction: model.predict(["Ich hasse dich!"]) ([1], array([[-3.50486898, 4.29590368, -0.9000684 ]])) Example for a neutral prediction: model.predict(["Heute ist Sonntag."]) ([2], array([[-2.94458342, -2.91875601, 4.94414234]])) This model was created by Maximilian Weissenbacher for a project at the University of Regensburg.
Contrastive-Tension/BERT-Distil-NLI-CT
[ "pytorch", "tf", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: ja license: cc-by-sa-4.0 datasets: - wikipedia - cc100 mask_token: "[MASK]" widget: - text: "早稲田 大学 で 自然 言語 処理 を [MASK] する 。" --- # nlp-waseda/roberta-large-japanese ## Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100. ## How to use You can use this model for masked language modeling as follows: ```python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("nlp-waseda/roberta-large-japanese") model = AutoModelForMaskedLM.from_pretrained("nlp-waseda/roberta-large-japanese") sentence = '早稲田 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance encoding = tokenizer(sentence, return_tensors='pt') ... ``` You can fine-tune this model on downstream tasks. ## Tokenization The input text should be segmented into words by [Juman++](https://github.com/ku-nlp/jumanpp) in advance. Juman++ 2.0.0-rc3 was used for pretraining. Each word is tokenized into tokens by [sentencepiece](https://github.com/google/sentencepiece). `BertJapaneseTokenizer` now supports automatic `JumanppTokenizer` and `SentencepieceTokenizer`. You can use [this model](https://huggingface.co/nlp-waseda/roberta-large-japanese-with-auto-jumanpp) without any data preprocessing. ## Vocabulary The vocabulary consists of 32000 tokens including words ([JumanDIC](https://github.com/ku-nlp/JumanDIC)) and subwords induced by the unigram language model of [sentencepiece](https://github.com/google/sentencepiece). ## Training procedure This model was trained on Japanese Wikipedia (as of 20210920) and the Japanese portion of CC-100. It took two weeks using eight NVIDIA A100 GPUs. The following hyperparameters were used during pretraining: - learning_rate: 6e-5 - per_device_train_batch_size: 103 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 5 - total_train_batch_size: 4120 - max_seq_length: 128 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-6 - lr_scheduler_type: linear - training_steps: 670000 - warmup_steps: 10000 - mixed_precision_training: Native AMP ## Performance on JGLUE See the [Baseline Scores](https://github.com/yahoojapan/JGLUE#baseline-scores) of JGLUE.
Contrastive-Tension/BERT-Large-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 208.07 +/- 67.39 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Contrastive-Tension/RoBerta-Large-CT-STSb
[ "pytorch", "tf", "jax", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- datasets: SberDevices/Golos --- # **Acoustic and language models** Acoustic model built using [QuartzNet15x5](https://arxiv.org/pdf/1910.10261.pdf) architecture and trained using [NeMo toolkit](https://github.com/NVIDIA/NeMo/tree/r1.0.0b4) Three n-gram language models created using [KenLM Language Model Toolkit](https://kheafield.com/code/kenlm) * LM built on [Common Crawl](https://commoncrawl.org) Russian dataset * LM built on [Golos](https://huggingface.co/datasets/SberDevices/Golos) train set * LM built on [Common Crawl](https://commoncrawl.org) and [Golos](https://huggingface.co/datasets/SberDevices/Golos) datasets together (50/50) | Archives | Size | Links | |--------------------------|------------|-----------------| | QuartzNet15x5_golos.nemo | 68 MB | https://sc.link/ZMv | | KenLMs.tar | 4.8 GB | https://sc.link/YL0 | Golos data and models are also available in the hub of pre-trained models, datasets, and containers - DataHub ML Space. You can train the model and deploy it on the high-performance SberCloud infrastructure in [ML Space](https://sbercloud.ru/ru/aicloud/mlspace) - full-cycle machine learning development platform for DS-teams collaboration based on the Christofari Supercomputer. ## **Evaluation** Percents of Word Error Rate for different test sets | Decoder \ Test set | Crowd test | Farfield test | MCV<sup>1</sup> dev | MCV<sup>1</sup> test | |-------------------------------------|-----------|----------|-----------|----------| | Greedy decoder | 4.389 % | 14.949 % | 9.314 % | 11.278 % | | Beam Search with Common Crawl LM | 4.709 % | 12.503 % | 6.341 % | 7.976 % | | Beam Search with Golos train set LM | 3.548 % | 12.384 % | - | - | | Beam Search with Common Crawl and Golos LM | 3.318 % | 11.488 % | 6.4 % | 8.06 % | <sup>1</sup> [Common Voice](https://commonvoice.mozilla.org) - Mozilla's initiative to help teach machines how real people speak. ## **Resources** [[arxiv.org] Golos: Russian Dataset for Speech Research](https://arxiv.org/abs/2106.10161) [[habr.com] Golos — самый большой русскоязычный речевой датасет, размеченный вручную, теперь в открытом доступе](https://habr.com/ru/company/sberdevices/blog/559496/) [[habr.com] Как улучшить распознавание русской речи до 3% WER с помощью открытых данных](https://habr.com/ru/company/sberdevices/blog/569082/)
CouchCat/ma_ner_v7_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 186.57 +/- 75.05 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
CouchCat/ma_sa_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "sentiment-analysis", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 243.43 +/- 22.55 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Cyrell/Cyrell
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: http://www.huggingtweets.com/marcfriedrich7/1652179164370/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1418445526375223297/XdAgs-rW_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">marc friedrich</div> <div style="text-align: center; font-size: 14px;">@marcfriedrich7</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from marc friedrich. | Data | marc friedrich | | --- | --- | | Tweets downloaded | 3249 | | Retweets | 705 | | Short tweets | 672 | | Tweets kept | 1872 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2p2smtko/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @marcfriedrich7's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2ly8l45f) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2ly8l45f/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/marcfriedrich7') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
D3vil/DialoGPT-smaall-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en library_name: k2 tags: - automatic-speech-recognition - k2 widget: - example_title: Librispeech sample 1 src: https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/resolve/main/test_wavs/1089-134686-0001.wav - example_title: Librispeech sample 2 src: https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/resolve/main/test_wavs/1221-135766-0001.wav - example_title: Librispeech sample 3 src: https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/resolve/main/test_wavs/1221-135766-0002.wav --- # Introduction See https://github.com/k2-fsa/icefall/pull/363
D3xter1922/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-google-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-google-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5185 - Wer: 0.3370 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 3.5137 | 1.0 | 500 | 1.6719 | 0.9580 | | 0.8324 | 2.01 | 1000 | 0.5546 | 0.5341 | | 0.4365 | 3.01 | 1500 | 0.4567 | 0.4635 | | 0.3058 | 4.02 | 2000 | 0.4429 | 0.4454 | | 0.2284 | 5.02 | 2500 | 0.4734 | 0.4186 | | 0.1892 | 6.02 | 3000 | 0.4191 | 0.4030 | | 0.1542 | 7.03 | 3500 | 0.4522 | 0.3985 | | 0.1364 | 8.03 | 4000 | 0.4749 | 0.3922 | | 0.1239 | 9.04 | 4500 | 0.4950 | 0.3977 | | 0.1092 | 10.04 | 5000 | 0.4468 | 0.3779 | | 0.0956 | 11.04 | 5500 | 0.4897 | 0.3789 | | 0.0897 | 12.05 | 6000 | 0.4927 | 0.3718 | | 0.0792 | 13.05 | 6500 | 0.5242 | 0.3699 | | 0.0731 | 14.06 | 7000 | 0.5202 | 0.3772 | | 0.0681 | 15.06 | 7500 | 0.5046 | 0.3637 | | 0.062 | 16.06 | 8000 | 0.5336 | 0.3664 | | 0.0556 | 17.07 | 8500 | 0.5017 | 0.3633 | | 0.0556 | 18.07 | 9000 | 0.5466 | 0.3736 | | 0.0461 | 19.08 | 9500 | 0.5489 | 0.3566 | | 0.0439 | 20.08 | 10000 | 0.5399 | 0.3559 | | 0.0397 | 21.08 | 10500 | 0.5154 | 0.3539 | | 0.0346 | 22.09 | 11000 | 0.5170 | 0.3513 | | 0.0338 | 23.09 | 11500 | 0.5236 | 0.3492 | | 0.0342 | 24.1 | 12000 | 0.5288 | 0.3493 | | 0.0282 | 25.1 | 12500 | 0.5147 | 0.3449 | | 0.0251 | 26.1 | 13000 | 0.5092 | 0.3442 | | 0.0268 | 27.11 | 13500 | 0.5093 | 0.3413 | | 0.021 | 28.11 | 14000 | 0.5310 | 0.3399 | | 0.022 | 29.12 | 14500 | 0.5185 | 0.3370 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 1.18.3 - Tokenizers 0.12.1
D3xter1922/electra-base-discriminator-finetuned-mnli
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_keras_callback model-index: - name: madatnlp/ket5-config-scratch results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # madatnlp/ket5-config-scratch This model is a fine-tuned version of [madatnlp/ket5-config-scratch](https://huggingface.co/madatnlp/ket5-config-scratch) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.3667 - Validation Loss: 1.4931 - Epoch: 18 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 1.4117 | 1.5494 | 0 | | 1.4012 | 1.5838 | 1 | | 1.4056 | 1.5710 | 2 | | 1.4133 | 1.5190 | 3 | | 1.3887 | 1.4858 | 4 | | 1.3951 | 1.4920 | 5 | | 1.3889 | 1.5038 | 6 | | 1.3804 | 1.5079 | 7 | | 1.3991 | 1.4368 | 8 | | 1.3852 | 1.4974 | 9 | | 1.3971 | 1.5164 | 10 | | 1.3853 | 1.5632 | 11 | | 1.3719 | 1.5163 | 12 | | 1.3805 | 1.4271 | 13 | | 1.3818 | 1.5078 | 14 | | 1.3741 | 1.4687 | 15 | | 1.3796 | 1.4623 | 16 | | 1.3701 | 1.5326 | 17 | | 1.3667 | 1.4931 | 18 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Datasets 2.1.0 - Tokenizers 0.12.1
D4RL1NG/yes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 254.09 +/- 18.39 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DCU-NLP/electra-base-irish-cased-generator-v1
[ "pytorch", "electra", "fill-mask", "ga", "transformers", "irish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en thumbnail: http://www.huggingtweets.com/broductmanager/1652182609331/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1522425562895044608/H93gVhPH_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">rahul</div> <div style="text-align: center; font-size: 14px;">@broductmanager</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from rahul. | Data | rahul | | --- | --- | | Tweets downloaded | 3244 | | Retweets | 85 | | Short tweets | 1164 | | Tweets kept | 1995 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1r967jne/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @broductmanager's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2zx676ih) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2zx676ih/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/broductmanager') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
DJSammy/bert-base-danish-uncased_BotXO-ai
[ "pytorch", "jax", "da", "dataset:common_crawl", "dataset:wikipedia", "transformers", "bert", "masked-lm", "license:cc-by-4.0", "fill-mask" ]
fill-mask
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.924327912379688 - name: Recall type: recall value: 0.9346683074169371 - name: F1 type: f1 value: 0.9294693514295249 - name: Accuracy type: accuracy value: 0.9836529143565221 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0615 - Precision: 0.9243 - Recall: 0.9347 - F1: 0.9295 - Accuracy: 0.9837 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2396 | 1.0 | 878 | 0.0715 | 0.9135 | 0.9228 | 0.9181 | 0.9805 | | 0.051 | 2.0 | 1756 | 0.0617 | 0.9192 | 0.9334 | 0.9263 | 0.9826 | | 0.0295 | 3.0 | 2634 | 0.0615 | 0.9243 | 0.9347 | 0.9295 | 0.9837 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
DJStomp/TestingSalvoNET
[ "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: dummy-model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dummy-model This model is a fine-tuned version of [camembert-base](https://huggingface.co/camembert-base) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Tokenizers 0.12.1
DKpro000/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: http://www.huggingtweets.com/_avichalp_/1652183801632/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1472922431396331520/eqT17_QF_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">avi</div> <div style="text-align: center; font-size: 14px;">@_avichalp_</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from avi. | Data | avi | | --- | --- | | Tweets downloaded | 2625 | | Retweets | 259 | | Short tweets | 596 | | Tweets kept | 1770 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2wg7ysai/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @_avichalp_'s tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3ae6t1qq) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3ae6t1qq/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/_avichalp_') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
DKpro000/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T11:59:05Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 237.90 +/- 21.51 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DSI/TweetBasedSA
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- language: en license: mit --- # MultiCite: Multi-label Citation Intent Analysis as paper-level Q&A (NAACL 2022) This model has been trained on the data available here: https://github.com/allenai/multicite.
Daivakai/DialoGPT-small-saitama
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 256.23 +/- 14.87 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Danbi/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - text2text-generation - generated_from_trainer metrics: - rouge - bleu datasets: - domenicrosati/QA2D model-index: - name: QA2D-t5-small results: - task: name: Question to Declarative Sentence type: text2text-generation dataset: name: domenicrosati/QA2D type: domenicrosati/QA2D args: plain_text metrics: - name: Rouge1 type: rouge value: 89.8753 - name: Rouge2 type: rouge value: 81.8104 - name: Rougel type: rouge value: 85.4253 - name: Rougelsum type: rouge value: 85.4236 - name: Bleu type: bleu value: 72.1080 widget: - text: "where in the world is carmen sandiego. she is in abruzzo" example_title: "Where is Carmen Sandiego?" - text: "which province is halifax in. nova scotia" example_title: "A Halifact" --- # QA2D-t5-small This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on [QA2D](https://huggingface.co/datasets/domenicrosati/QA2D). It achieves the following results on the evaluation set: - Loss: 0.3236 - Rouge1: 89.8753 - Rouge2: 81.8104 - Rougel: 85.4253 - Rougelsum: 85.4236 - Bleu: 72.1080 See: [https://wandb.ai/domenicrosati/huggingface/runs/n1yallpe](https://wandb.ai/domenicrosati/huggingface/runs/n1yallpe) for training and eval stats and [https://github.com/domenicrosati/qa2d-models](https://github.com/domenicrosati/qa2d-models) for the code! ## Model description A t5-model model to convert questions, answer pairs into statements. Due to the way it's been trained the input should be all lower case and punctuation removed. Use with `. ` as the seperator between question and answer. > "where in the world is carmen. abruzzo" > Output: "carmen is in abruzzo" Thought punctation and upper case works. ``` from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained('domenicrosati/QA2D-t5-small') model = AutoModelForSeq2SeqLM.from_pretrained('domenicrosati/QA2D-t5-small') question = "where in the world is carmen sandiego" answer = "she is in abruzzo" SEP = ". " prompt = f'{question}{SEP}{answer}' input_ids = tokenizer(prompt, return_tensors='pt').input_ids output_ids = model.generate(input_ids) responses = tokenizer.batch_decode(output_ids, skip_special_tokens=True) # ['carmen sandiego is in abruzzo'] ``` ## Intended uses & limitations To convert questions, answer pairs into statements. ## Training and evaluation data Uses [QA2D](https://huggingface.co/datasets/domenicrosati/QA2D). See [https://github.com/domenicrosati/qa2d-models](https://github.com/domenicrosati/qa2d-models) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bleu | |:-------------:|:-----:|:------:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 0.3177 | 1.0 | 5060 | 0.3144 | 89.6379 | 81.3168 | 85.2036 | 85.1904 | 71.4255 | | 0.2479 | 2.0 | 10120 | 0.3035 | 89.7816 | 81.6556 | 85.3541 | 85.3406 | 71.7248 | | 0.2268 | 3.0 | 15180 | 0.3015 | 89.8287 | 81.698 | 85.3434 | 85.3387 | 71.8344 | | 0.2111 | 4.0 | 20240 | 0.3014 | 89.8082 | 81.7192 | 85.4094 | 85.406 | 71.9172 | | 0.1991 | 5.0 | 25300 | 0.3023 | 89.8776 | 81.7607 | 85.3912 | 85.3842 | 71.9417 | | 0.1886 | 6.0 | 30360 | 0.3012 | 89.901 | 81.7614 | 85.3345 | 85.3315 | 72.0218 | | 0.1803 | 7.0 | 35420 | 0.3010 | 89.8776 | 81.8189 | 85.4154 | 85.4097 | 72.0533 | | 0.1724 | 8.0 | 40480 | 0.3041 | 89.9168 | 81.8663 | 85.4457 | 85.4447 | 72.1470 | | 0.1654 | 9.0 | 45540 | 0.3076 | 89.8901 | 81.8536 | 85.4857 | 85.4863 | 72.0830 | | 0.1601 | 10.0 | 50600 | 0.3083 | 89.9186 | 81.881 | 85.4653 | 85.4594 | 72.1048 | | 0.1546 | 11.0 | 55660 | 0.3136 | 89.8958 | 81.8533 | 85.4217 | 85.4238 | 72.0752 | | 0.1502 | 12.0 | 60720 | 0.3138 | 89.903 | 81.8604 | 85.4301 | 85.4267 | 72.1373 | | 0.1461 | 13.0 | 65780 | 0.3140 | 89.8867 | 81.7945 | 85.3698 | 85.3662 | 72.0718 | | 0.1423 | 14.0 | 70840 | 0.3171 | 89.8985 | 81.8221 | 85.4348 | 85.4331 | 72.1168 | | 0.1392 | 15.0 | 75900 | 0.3186 | 89.8938 | 81.8246 | 85.402 | 85.3991 | 72.0858 | | 0.1366 | 16.0 | 80960 | 0.3208 | 89.859 | 81.8133 | 85.4194 | 85.4182 | 72.1014 | | 0.1344 | 17.0 | 86020 | 0.3222 | 89.8909 | 81.828 | 85.4392 | 85.435 | 72.1380 | | 0.1324 | 18.0 | 91080 | 0.3226 | 89.8906 | 81.8351 | 85.4506 | 85.4441 | 72.1622 | | 0.1309 | 19.0 | 96140 | 0.3231 | 89.8925 | 81.8369 | 85.4375 | 85.4366 | 72.1552 | | 0.1305 | 20.0 | 101200 | 0.3236 | 89.8753 | 81.8104 | 85.4253 | 85.4236 | 72.1080 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Dandara/bertimbau-socioambiental
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # florentgbelidji/setfit_emotion This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('florentgbelidji/setfit_emotion') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=florentgbelidji/setfit_emotion) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 203 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.dataloader._InfiniteConstantSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.BatchHardTripletLoss.BatchHardTripletLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 4060, "warmup_steps": 406, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DannyMichael/ECU911
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: con_gpt_med_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # con_gpt_med_model This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6 hello
DarkestSky/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: vanichandna/bert-base-multilingual-cased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # vanichandna/bert-base-multilingual-cased-finetuned-squad This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.5313 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 1e-05, 'decay_steps': 43880, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 1.2336 | 0 | | 0.8301 | 1 | | 0.6456 | 2 | | 0.5313 | 3 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Datasets 2.1.0 - Tokenizers 0.12.1
Davlan/bert-base-multilingual-cased-finetuned-naija
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-peyma-fa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-peyma-fa This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0937 - F1: 0.9249 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1562 | 1.0 | 998 | 0.0691 | 0.8777 | | 0.0638 | 2.0 | 1996 | 0.0703 | 0.8908 | | 0.0457 | 3.0 | 2994 | 0.0645 | 0.8975 | | 0.0281 | 4.0 | 3992 | 0.0842 | 0.8994 | | 0.0206 | 5.0 | 4990 | 0.0651 | 0.9164 | | 0.0139 | 6.0 | 5988 | 0.0787 | 0.9148 | | 0.0083 | 7.0 | 6986 | 0.0838 | 0.9253 | | 0.0052 | 8.0 | 7984 | 0.0833 | 0.9221 | | 0.0031 | 9.0 | 8982 | 0.0947 | 0.9230 | | 0.0028 | 10.0 | 9980 | 0.0937 | 0.9249 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.9.1 - Datasets 2.1.0 - Tokenizers 0.12.1
Davlan/bert-base-multilingual-cased-finetuned-swahili
[ "pytorch", "tf", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
67
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-finetuned-scrambled-squad-5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-scrambled-squad-5 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.7078 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.7695 | 1.0 | 5532 | 1.7078 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Davlan/distilbert-base-multilingual-cased-masakhaner
[ "pytorch", "tf", "distilbert", "token-classification", "arxiv:2103.11811", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 285.62 +/- 20.33 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Davlan/mbart50-large-eng-yor-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-osdg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-osdg This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8193 - F1 Score: 0.7962 - Accuracy: 0.8434 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 Score | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.3769 | 1.0 | 1017 | 0.8258 | 0.7729 | 0.8257 | | 0.2759 | 2.0 | 2034 | 0.8364 | 0.7773 | 0.8262 | | 0.1412 | 3.0 | 3051 | 1.0203 | 0.7833 | 0.8379 | | 0.1423 | 4.0 | 4068 | 1.1603 | 0.7683 | 0.8224 | | 0.0939 | 5.0 | 5085 | 1.3029 | 0.7843 | 0.8329 | | 0.0757 | 6.0 | 6102 | 1.3562 | 0.7931 | 0.8379 | | 0.0801 | 7.0 | 7119 | 1.2925 | 0.7840 | 0.8395 | | 0.0311 | 8.0 | 8136 | 1.4632 | 0.7750 | 0.8318 | | 0.0263 | 9.0 | 9153 | 1.5760 | 0.7843 | 0.8312 | | 0.0196 | 10.0 | 10170 | 1.5689 | 0.7890 | 0.8417 | | 0.0313 | 11.0 | 11187 | 1.6034 | 0.7909 | 0.8417 | | 0.0007 | 12.0 | 12204 | 1.6725 | 0.7889 | 0.8406 | | 0.0081 | 13.0 | 13221 | 1.6463 | 0.7911 | 0.8395 | | 0.0061 | 14.0 | 14238 | 1.7730 | 0.7861 | 0.8345 | | 0.003 | 15.0 | 15255 | 1.8001 | 0.7847 | 0.8379 | | 0.0002 | 16.0 | 16272 | 1.7328 | 0.7912 | 0.8434 | | 0.0 | 17.0 | 17289 | 1.7914 | 0.8011 | 0.8489 | | 0.0009 | 18.0 | 18306 | 1.7772 | 0.7958 | 0.8456 | | 0.0 | 19.0 | 19323 | 1.8028 | 0.7958 | 0.8434 | | 0.0 | 20.0 | 20340 | 1.8193 | 0.7962 | 0.8434 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Davlan/mt5_base_eng_yor_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-05-10T14:48:32Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 273.23 +/- 17.77 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Davlan/xlm-roberta-base-finetuned-wolof
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-rater results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-rater This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.9.1 - Datasets 1.18.4 - Tokenizers 0.11.6
Dazai/Ok
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T17:31:43Z
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
Dbluciferm3737/Idk
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T17:31:55Z
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
Dbluciferm3737/U
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
Ddarkros/Test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T17:32:24Z
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
DeadBeast/emoBERTTamil
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:tamilmixsentiment", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
2022-05-10T17:38:23Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6 This model is a fine-tuned version of [husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_5](https://huggingface.co/husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_5) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.3646 - Wer: 0.3478 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1024 | 0.51 | 400 | 0.4030 | 0.4171 | | 0.1533 | 1.02 | 800 | 0.4733 | 0.4570 | | 0.1584 | 1.53 | 1200 | 0.4150 | 0.4371 | | 0.1538 | 2.04 | 1600 | 0.4104 | 0.4390 | | 0.1395 | 2.55 | 2000 | 0.3891 | 0.4133 | | 0.1415 | 3.07 | 2400 | 0.3877 | 0.4015 | | 0.1261 | 3.58 | 2800 | 0.3685 | 0.3899 | | 0.1149 | 4.09 | 3200 | 0.3791 | 0.3881 | | 0.1003 | 4.6 | 3600 | 0.3642 | 0.3626 | | 0.0934 | 5.11 | 4000 | 0.3755 | 0.3516 | | 0.0805 | 5.62 | 4400 | 0.3646 | 0.3478 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 2.1.0 - Tokenizers 0.10.3
DeadBeast/korscm-mBERT
[ "pytorch", "bert", "text-classification", "korean", "dataset:Korean-Sarcasm", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43
2022-05-10T17:47:22Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-rater results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-rater This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.9.1 - Datasets 1.18.4 - Tokenizers 0.11.6
DeadBeast/mbert-base-cased-finetuned-bengali-fakenews
[ "pytorch", "bert", "text-classification", "bengali", "dataset:BanFakeNews", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
2022-05-10T18:05:51Z
--- tags: - espnet - audio - text-to-speech language: en datasets: - ryanspeech license: cc-by-nc-4.0 widget: - text: "This seems a very pleasant place, and I think I shall enjoy myself very much." --- ## RyanSpeech model (based on ESPnet2) ### `espnet/english_male_ryanspeech_tacotron` This model was trained by [Rohola Zandie](https://scholar.google.com/citations?user=xv0jIe0AAAAJ&hl=en) using ryanspeech recipe in [espnet](https://github.com/espnet/espnet/). For the best results you need to download the vocoder separately from [here](https://drive.google.com/file/d/10GYvB_mIKzXzSjD67tSnBhknZRoBjsNb/view?usp=sharing) and then use the following code: ``` from espnet2.bin.tts_inference import Text2Speech from scipy.io.wavfile import write model = Text2Speech.from_pretrained( model_file="espnet/english_male_ryanspeech_tacotron", vocoder_file="path_to_vocoder/train_nodev_parallel_wavegan.v1.long/checkpoint-1000000steps.pkl" ) output = model("This is a simple test.") write("x.wav", 22050, output['wav'].numpy()) ``` ## Download the dataset You can download RyanSpeech dataset from [here](https://www.kaggle.com/datasets/roholazandie/ryanspeech) or here. ## TTS config <details><summary>expand</summary> ``` config: conf/train.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/tts_train_raw_phn_tacotron_g2p_en_no_space ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 200 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - loss - min - - train - loss - min keep_nbest_models: 5 grad_clip: 1.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 1 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null pretrain_path: [] pretrain_key: [] num_iters_per_epoch: 500 batch_size: 20 valid_batch_size: null batch_bins: 5120000 valid_batch_bins: null train_shape_file: - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/text_shape.phn - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/speech_shape valid_shape_file: - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/valid/text_shape.phn - exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/valid/speech_shape batch_type: numel valid_batch_type: null fold_length: - 150 - 204800 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/tr_no_dev/text - text - text - - dump/raw/tr_no_dev/wav.scp - speech - sound valid_data_path_and_name_and_type: - - dump/raw/dev/text - text - text - - dump/raw/dev/wav.scp - speech - sound allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 valid_max_cache_size: null optim: adam optim_conf: lr: 0.001 eps: 1.0e-06 weight_decay: 0.0 scheduler: null scheduler_conf: {} token_list: - <blank> - <unk> - AH0 - T - N - S - R - D - L - K - IH1 - M - EH1 - Z - DH - UW1 - AE1 - IH0 - AY1 - AH1 - W - . - P - F - IY1 - V - ER0 - AA1 - B - AO1 - HH - EY1 - IY0 - ',' - Y - NG - OW1 - G - AW1 - TH - SH - UH1 - '?' - ER1 - JH - CH - OW0 - OW2 - EH2 - IH2 - EY2 - AA2 - AE2 - AY2 - '''' - OY1 - UW0 - '!' - AO2 - EH0 - ZH - AH2 - AE0 - UW2 - AA0 - AY0 - IY2 - AW2 - AO0 - EY0 - ER2 - UH2 - '...' - AW0 - UH0 - OY2 - <sos/eos> odim: null model_conf: {} use_preprocessor: true token_type: phn bpemodel: null non_linguistic_symbols: null cleaner: tacotron g2p: g2p_en_no_space feats_extract: fbank feats_extract_conf: fs: 22050 fmin: 80 fmax: 7600 n_mels: 80 hop_length: 256 n_fft: 1024 win_length: null normalize: global_mvn normalize_conf: stats_file: exp/tts_stats_raw_phn_tacotron_g2p_en_no_space/train/feats_stats.npz tts: tacotron2 tts_conf: embed_dim: 512 elayers: 1 eunits: 512 econv_layers: 3 econv_chans: 512 econv_filts: 5 atype: location adim: 512 aconv_chans: 32 aconv_filts: 15 cumulate_att_w: true dlayers: 2 dunits: 1024 prenet_layers: 2 prenet_units: 256 postnet_layers: 5 postnet_chans: 512 postnet_filts: 5 output_activation: null use_batch_norm: true use_concate: true use_residual: false dropout_rate: 0.5 zoneout_rate: 0.1 reduction_factor: 1 spk_embed_dim: null use_masking: true bce_pos_weight: 5.0 use_guided_attn_loss: true guided_attn_loss_sigma: 0.4 guided_attn_loss_lambda: 1.0 pitch_extract: null pitch_extract_conf: {} pitch_normalize: null pitch_normalize_conf: {} energy_extract: null energy_extract_conf: {} energy_normalize: null energy_normalize_conf: {} required: - output_dir - token_list distributed: false ``` </details> ### Citing RyanSpeech ```BibTex @inproceedings{Zandie2021RyanSpeechAC, title={RyanSpeech: A Corpus for Conversational Text-to-Speech Synthesis}, author={Rohola Zandie and Mohammad H. Mahoor and Julia Madsen and Eshrat S. Emamian}, booktitle={Interspeech}, year={2021} } ```
DeadBeast/roberta-base-pretrained-mr-2
[ "pytorch", "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-05-10T18:13:25Z
--- tags: - espnet - audio - text-to-speech language: en datasets: - ryanspeech license: cc-by-nc-4.0 widget: - text: "This seems a very pleasant place, and I think I shall enjoy myself very much." --- ## RyanSpeech model (based on ESPnet2) ### `espnet/english_male_ryanspeech_fastspeech2` This model was trained by [Rohola Zandie](https://scholar.google.com/citations?user=xv0jIe0AAAAJ&hl=en) using ryanspeech recipe in [espnet](https://github.com/espnet/espnet/). For the best results you need to download the vocoder separately from [here](https://drive.google.com/file/d/10GYvB_mIKzXzSjD67tSnBhknZRoBjsNb/view?usp=sharing) and then use the following code: ``` from espnet2.bin.tts_inference import Text2Speech from scipy.io.wavfile import write model = Text2Speech.from_pretrained( model_file="espnet/english_male_ryanspeech_fastspeech2", vocoder_file="path_to_vocoder/train_nodev_parallel_wavegan.v1.long/checkpoint-1000000steps.pkl" ) output = model("This is a simple test.") write("x.wav", 22050, output['wav'].numpy()) ``` ## Download the dataset You can download RyanSpeech dataset from [here](https://www.kaggle.com/datasets/roholazandie/ryanspeech) or here. ## TTS config <details><summary>expand</summary> ``` config: conf/tuning/train_fastspeech.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/tts_train_fastspeech2_raw_phn_tacotron_g2p_en_no_space ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 1000 patience: null val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - valid - loss - min - - train - loss - min keep_nbest_models: 5 grad_clip: 1.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 6 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null pretrain_path: [] pretrain_key: [] num_iters_per_epoch: 500 batch_size: 20 valid_batch_size: null batch_bins: 800000 valid_batch_bins: null train_shape_file: - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/text_shape.phn - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/speech_shape valid_shape_file: - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/text_shape.phn - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/speech_shape batch_type: numel valid_batch_type: null fold_length: - 150 - 204800 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/tr_no_dev/text - text - text - - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/tr_no_dev/durations - durations - text_int - - dump/raw/tr_no_dev/wav.scp - speech - sound valid_data_path_and_name_and_type: - - dump/raw/dev/text - text - text - - exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/dev/durations - durations - text_int - - dump/raw/dev/wav.scp - speech - sound allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 valid_max_cache_size: null optim: adam optim_conf: lr: 1.0 scheduler: noamlr scheduler_conf: model_size: 384 warmup_steps: 4000 token_list: - <blank> - <unk> - AH0 - T - N - S - R - D - L - K - IH1 - M - EH1 - Z - DH - UW1 - AE1 - IH0 - AY1 - AH1 - W - . - P - F - IY1 - V - ER0 - AA1 - B - AO1 - HH - EY1 - IY0 - ',' - Y - NG - OW1 - G - AW1 - TH - SH - UH1 - '?' - ER1 - JH - CH - OW0 - OW2 - EH2 - IH2 - EY2 - AA2 - AE2 - AY2 - '''' - OY1 - UW0 - '!' - AO2 - EH0 - ZH - AH2 - AE0 - UW2 - AA0 - AY0 - IY2 - AW2 - AO0 - EY0 - ER2 - UH2 - '...' - AW0 - UH0 - OY2 - <sos/eos> odim: null model_conf: {} use_preprocessor: true token_type: phn bpemodel: null non_linguistic_symbols: null cleaner: tacotron g2p: g2p_en_no_space feats_extract: fbank feats_extract_conf: fs: 22050 fmin: 80 fmax: 7600 n_mels: 80 hop_length: 256 n_fft: 1024 win_length: null normalize: global_mvn normalize_conf: stats_file: exp/tts_train_raw_phn_tacotron_g2p_en_no_space/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/feats_stats.npz tts: fastspeech tts_conf: adim: 384 aheads: 2 elayers: 6 eunits: 1536 dlayers: 6 dunits: 1536 positionwise_layer_type: conv1d positionwise_conv_kernel_size: 3 duration_predictor_layers: 2 duration_predictor_chans: 384 duration_predictor_kernel_size: 3 postnet_layers: 5 postnet_filts: 5 postnet_chans: 256 use_masking: true use_scaled_pos_enc: true encoder_normalize_before: true decoder_normalize_before: true reduction_factor: 1 init_type: xavier_uniform init_enc_alpha: 1.0 init_dec_alpha: 1.0 transformer_enc_dropout_rate: 0.1 transformer_enc_positional_dropout_rate: 0.1 transformer_enc_attn_dropout_rate: 0.1 transformer_dec_dropout_rate: 0.1 transformer_dec_positional_dropout_rate: 0.1 transformer_dec_attn_dropout_rate: 0.1 pitch_extract: null pitch_extract_conf: {} pitch_normalize: null pitch_normalize_conf: {} energy_extract: null energy_extract_conf: {} energy_normalize: null energy_normalize_conf: {} required: - output_dir - token_list distributed: false ``` </details> ### Citing RyanSpeech ```BibTex @inproceedings{Zandie2021RyanSpeechAC, title={RyanSpeech: A Corpus for Conversational Text-to-Speech Synthesis}, author={Rohola Zandie and Mohammad H. Mahoor and Julia Madsen and Eshrat S. Emamian}, booktitle={Interspeech}, year={2021} } ```
DeadBeast/roberta-base-pretrained-mr
[ "jax", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-05-10T18:15:33Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: distilbart-cnn-arxiv-pubmed-pubmed-v3-e16 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbart-cnn-arxiv-pubmed-pubmed-v3-e16 This model is a fine-tuned version of [theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed](https://huggingface.co/theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8306 - Rouge1: 56.4519 - Rouge2: 41.6818 - Rougel: 44.7833 - Rougelsum: 54.6359 - Gen Len: 141.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 1.0 | 398 | 1.1157 | 50.9487 | 31.3005 | 34.0145 | 48.6057 | 141.8519 | | 1.3569 | 2.0 | 796 | 0.9688 | 53.0653 | 34.1855 | 37.0759 | 50.5942 | 141.2963 | | 0.8704 | 3.0 | 1194 | 0.9053 | 53.9684 | 36.0388 | 38.6674 | 51.9604 | 142.0 | | 0.6287 | 4.0 | 1592 | 0.8515 | 54.2379 | 36.4915 | 39.1393 | 51.6991 | 141.4074 | | 0.6287 | 5.0 | 1990 | 0.8274 | 53.6806 | 34.8373 | 37.7369 | 51.239 | 141.6481 | | 0.465 | 6.0 | 2388 | 0.8486 | 55.2534 | 39.1757 | 41.6366 | 53.2989 | 141.9259 | | 0.3432 | 7.0 | 2786 | 0.8116 | 54.539 | 37.6314 | 40.5531 | 52.1997 | 141.3889 | | 0.2577 | 8.0 | 3184 | 0.7976 | 54.8212 | 36.8347 | 40.6768 | 52.7785 | 142.0 | | 0.204 | 9.0 | 3582 | 0.8010 | 53.9302 | 37.3523 | 40.135 | 52.139 | 141.7778 | | 0.204 | 10.0 | 3980 | 0.8168 | 54.3151 | 38.0665 | 42.4112 | 52.4682 | 142.0 | | 0.1663 | 11.0 | 4378 | 0.8171 | 54.7027 | 38.3117 | 42.0196 | 52.8821 | 142.0 | | 0.135 | 12.0 | 4776 | 0.8202 | 54.1035 | 37.9154 | 40.7676 | 52.2509 | 142.0 | | 0.1102 | 13.0 | 5174 | 0.8204 | 56.223 | 41.0947 | 44.0131 | 54.3353 | 142.0 | | 0.0928 | 14.0 | 5572 | 0.8280 | 56.1637 | 41.0408 | 44.2931 | 54.5488 | 142.0 | | 0.0928 | 15.0 | 5970 | 0.8273 | 56.2608 | 41.3855 | 44.4432 | 54.5778 | 142.0 | | 0.0847 | 16.0 | 6368 | 0.8306 | 56.4519 | 41.6818 | 44.7833 | 54.6359 | 141.9815 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
Dean/summarsiation
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - et widget: - text: "te olete ka noh, noh, päris korralikult ka Rahvusringhäälingu teatud mõttes sellisesse keerulisse olukorda pannud," - text: "Et, et, et miks mitte olla siis tasakaalus, ma noh, hüpoteetiliselt viskan selle palli üles," --- Model usage: ``` tokenizer = MBart50Tokenizer.from_pretrained("IljaSamoilov/MBART-estonian-subtitles", src_lang="et_EE", tgt_lang="et_EE") model = MBartForConditionalGeneration.from_pretrained("IljaSamoilov/MBART-estonian-subtitles") ```
Declan/Breitbart_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T18:23:41Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 289.14 +/- 17.41 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Breitbart_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T18:46:07Z
# Basic Information This is the Dr. Decr model used in XOR-TyDi leaderboard task 1 whitebox submission. https://nlp.cs.washington.edu/xorqa/ The detailed implementation of the model can be found in: https://arxiv.org/pdf/2112.08185.pdf Source code to train the model can be found via PrimeQA's IR component: https://github.com/primeqa/primeqa/tree/main/examples/drdecr It is a Neural IR model built on top of the ColBERTv1 api and not directly compatible with Huggingface API. The inference result on XOR Dev dataset is: ``` R@2kt R@5kt te 66.67 70.88 bn 70.23 75.08 fi 82.24 86.18 ja 65.92 72.93 ko 67.93 71.73 ru 63.07 69.71 ar 78.15 82.77 Avg 70.60 75.61 ``` # Limitations and Bias This model used pre-trained XLM-R base model and fine tuned on 7 languages in XOR-TyDi leaderboard. The performance of other languages was not tested. Since the model was fine-tuned on a large pre-trained language model XLM-Roberta, biases associated with the pre-existing XLM-Roberta model may be present in our fine-tuned model, Dr. Decr # Citation ``` @article{Li2021_DrDecr, doi = {10.48550/ARXIV.2112.08185}, url = {https://arxiv.org/abs/2112.08185}, author = {Li, Yulong and Franz, Martin and Sultan, Md Arafat and Iyer, Bhavani and Lee, Young-Suk and Sil, Avirup}, keywords = {Computation and Language (cs.CL), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Learning Cross-Lingual IR from an English Retriever}, publisher = {arXiv}, year = {2021} } ```
Declan/CNN_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T19:03:07Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 238.37 +/- 65.78 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/CNN_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-base-finetuned-scrambled-squad-15 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-scrambled-squad-15 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.8722 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.8944 | 1.0 | 5590 | 1.8722 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
Declan/CNN_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T19:16:46Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 224.96 +/- 73.06 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/CNN_model_v7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T19:23:18Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 238.83 +/- 22.61 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/ChicagoTribune_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: mlp results: - metrics: - type: mean_reward value: 274.83 +/- 24.24 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **mlp** Agent playing **LunarLander-v2** This is a trained model of a **mlp** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/ChicagoTribune_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T19:34:58Z
--- tags: - generated_from_trainer model-index: - name: tests-finetuned-squad-full results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tests-finetuned-squad-full This model is a fine-tuned version of [sberbank-ai/sbert_large_nlu_ru](https://huggingface.co/sberbank-ai/sbert_large_nlu_ru) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5672 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0601 | 1.0 | 11307 | 1.0849 | | 0.6918 | 2.0 | 22614 | 1.1588 | | 0.4071 | 3.0 | 33921 | 1.5672 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2.dev0 - Tokenizers 0.12.1
Declan/ChicagoTribune_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T19:37:10Z
--- license: mit tags: - generated_from_trainer datasets: - keyword_pubmed_dataset metrics: - accuracy model-index: - name: kw_pubmed_1000_0.0003 results: - task: name: Masked Language Modeling type: fill-mask dataset: name: keyword_pubmed_dataset type: keyword_pubmed_dataset args: sentence metrics: - name: Accuracy type: accuracy value: 0.33938523162661094 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kw_pubmed_1000_0.0003 This model is a fine-tuned version of [microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) on the keyword_pubmed_dataset dataset. It achieves the following results on the evaluation set: - Loss: 4.7086 - Accuracy: 0.3394 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 250 - total_train_batch_size: 8000 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.09 | 4 | 4.3723 | 0.3436 | | 6.0386 | 0.17 | 8 | 4.2113 | 0.3442 | | 3.7573 | 0.26 | 12 | 4.2079 | 0.3634 | | 2.9944 | 0.35 | 16 | 4.3370 | 0.3513 | | 2.7048 | 0.44 | 20 | 4.8594 | 0.3067 | | 2.7048 | 0.52 | 24 | 4.4929 | 0.3383 | | 2.9458 | 0.61 | 28 | 4.5146 | 0.3408 | | 2.3783 | 0.7 | 32 | 4.5680 | 0.3430 | | 2.2485 | 0.78 | 36 | 4.5095 | 0.3477 | | 2.1701 | 0.87 | 40 | 4.4971 | 0.3449 | | 2.1701 | 0.96 | 44 | 4.7051 | 0.3321 | | 2.0861 | 1.07 | 48 | 4.7615 | 0.3310 | | 2.4168 | 1.15 | 52 | 4.7086 | 0.3394 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
Declan/ChicagoTribune_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T19:52:23Z
--- tags: - generated_from_trainer model-index: - name: zh-finetuned-squad-qa-minilmv2-32 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zh-finetuned-squad-qa-minilmv2-32 This model is a fine-tuned version of [subhasisj/zh-TAPT-MLM-MiniLM](https://huggingface.co/subhasisj/zh-TAPT-MLM-MiniLM) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.4311 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 338 | 2.3706 | | 3.1254 | 2.0 | 676 | 1.7422 | | 1.6449 | 3.0 | 1014 | 1.5323 | | 1.6449 | 4.0 | 1352 | 1.4375 | | 1.3122 | 5.0 | 1690 | 1.4311 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
Declan/FoxNews_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T20:05:10Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 281.46 +/- 17.95 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/FoxNews_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T20:06:22Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 228.05 +/- 22.63 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/FoxNews_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T20:11:36Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: vanichandna/indic-bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # vanichandna/indic-bert-finetuned-squad This model is a fine-tuned version of [ai4bharat/indic-bert](https://huggingface.co/ai4bharat/indic-bert) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.0802 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 21984, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 1.8468 | 0 | | 1.4510 | 1 | | 1.2435 | 2 | | 1.0802 | 3 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Datasets 2.2.0 - Tokenizers 0.12.1
Declan/FoxNews_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T20:13:04Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 266.98 +/- 12.23 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/HuffPost_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T20:34:51Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 283.12 +/- 15.88 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/HuffPost_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 287.12 +/- 20.40 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/HuffPost_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: KenP/codeparrot-ds results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # KenP/codeparrot-ds This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 10.3900 - Validation Loss: 9.6171 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 5e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': -922, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 10.3900 | 9.6171 | 0 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Datasets 2.2.0 - Tokenizers 0.12.1
Declan/HuffPost_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T20:49:01Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 289.86 +/- 15.74 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/NPR_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 252.23 +/- 33.93 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-05-10T21:03:08Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 250.14 +/- 16.42 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/NPR_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T21:27:07Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: distilbart-cnn-arxiv-pubmed-pubmed-v3-e8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbart-cnn-arxiv-pubmed-pubmed-v3-e8 This model is a fine-tuned version of [theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed](https://huggingface.co/theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8422 - Rouge1: 54.9328 - Rouge2: 36.7154 - Rougel: 39.5674 - Rougelsum: 52.4889 - Gen Len: 142.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 1.0 | 398 | 1.1158 | 50.9754 | 30.9416 | 33.9908 | 48.4925 | 142.0 | | 1.3585 | 2.0 | 796 | 0.9733 | 52.7954 | 33.8196 | 36.7836 | 50.4929 | 141.9259 | | 0.8785 | 3.0 | 1194 | 0.9142 | 53.5548 | 35.3954 | 37.4787 | 51.1024 | 142.0 | | 0.6485 | 4.0 | 1592 | 0.8666 | 52.6449 | 34.0018 | 37.5391 | 50.428 | 141.4074 | | 0.6485 | 5.0 | 1990 | 0.8458 | 53.8913 | 35.4481 | 38.1552 | 51.3737 | 141.8889 | | 0.4993 | 6.0 | 2388 | 0.8571 | 54.7333 | 36.8173 | 40.228 | 52.5574 | 141.9444 | | 0.3957 | 7.0 | 2786 | 0.8455 | 54.9826 | 37.9674 | 40.5786 | 52.5968 | 141.9815 | | 0.328 | 8.0 | 3184 | 0.8422 | 54.9328 | 36.7154 | 39.5674 | 52.4889 | 142.0 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
Declan/NPR_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T21:40:24Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 159.15 +/- 61.12 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/NPR_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T21:44:09Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 267.52 +/- 11.83 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/NewYorkPost_model_v1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T21:52:11Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 245.12 +/- 56.29 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Politico_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - fastai --- # Amazing! 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub! # Some next steps 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))! 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)). 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)! Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card. --- # Model card ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed
Declan/Politico_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T23:04:00Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 219.43 +/- 72.12 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Politico_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 285.06 +/- 23.34 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Politico_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-10T23:17:10Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 299.86 +/- 20.60 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Reuters_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T23:19:28Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 293.97 +/- 11.26 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Reuters_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T23:23:03Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 299.81 +/- 18.80 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Reuters_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T23:25:05Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 293.91 +/- 13.82 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/WallStreetJournal_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-10T23:33:17Z
precision recall f1-score support negative 0.733238 0.778788 0.755327 660 neutral 0.757962 0.779963 0.768805 1068 positive 0.722793 0.728778 0.725773 483 skip 0.660714 0.501355 0.570108 369 speech 0.767857 0.868687 0.815166 99 accuracy 0.735349 2679 macro avg 0.728513 0.731514 0.727036 2679 weighted avg 0.732501 0.735349 0.732071 2679 Avg macro Precision 0.7297744200895245 Avg macro Recall 0.7248163039465004 Avg macro F1 0.7229310729744304 Avg weighted F1 0.7281243075011377
Declan/test_push
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-10T23:46:11Z
--- library_name: stable-baselines3 tags: - BipedalWalker-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 185.82 +/- 92.04 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: BipedalWalker-v3 type: BipedalWalker-v3 --- # **PPO** Agent playing **BipedalWalker-v3** This is a trained model of a **PPO** agent playing **BipedalWalker-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DeepPavlov/bert-base-multilingual-cased-sentence
[ "pytorch", "jax", "bert", "feature-extraction", "multilingual", "arxiv:1704.05426", "arxiv:1809.05053", "arxiv:1908.10084", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
140
2022-05-11T00:35:40Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 143.30 +/- 118.69 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -3.67 +/- 28.18 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DeskDown/MarianMixFT_en-my
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 --- [千言—AdvertiseGen广告文案生成数据集](https://www.luge.ai/#/luge/dataDetail?id=9) > 仅支持.bin(pytorch) 在该千言数据集微调了5个epoch, ```python input_text = '类型#裙*材质#针织*风格#简约*风格#青春*风格#清新*风格#性感*图案#条纹*图案#撞色*裙下摆#开叉*裙长#连衣裙*裙款式#拼接*裙款式#吊带' output_text = gen_ads(input_text) output_text = output_text.replace(' ', '') output_text = output_text[len(input_text):] output_text ``` 输出(实际中注意控制max_length) ```python output_text='夏天穿的针织衫,搭配简约上衣+牛仔裙,一下子就活泼起来了好吧,就这么简约的蓝色衬托出女性优雅的气质,搭出一派优雅女人味,让人印象深刻哦~好了,今天是秋天来了,天气凉了,是不是该穿上针织呢,秋天会是一个充满阳光的日子呢?让我们一起去看看今天的穿搭吧!首先是白色风衣,其次是棉质风衣。在秋天我们应该穿丝缎或者花边,这种比较清新的风格一定不会让人觉得很成熟,而且又是简约款式,显得自然、有气质。再就是皮草风衣啦,一件白皮草+一件牛仔+两件棉纱的搭配就很潮' ```
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-05-11T02:17:54Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: flat_N_max results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flat_N_max This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8536 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.2462 | 1.0 | 2213 | 1.7958 | | 0.9293 | 2.0 | 4426 | 1.8093 | | 0.7249 | 3.0 | 6639 | 1.8536 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8620945214069894 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1372 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2575 | 1.0 | 525 | 0.1621 | 0.8292 | | 0.1287 | 2.0 | 1050 | 0.1378 | 0.8526 | | 0.0831 | 3.0 | 1575 | 0.1372 | 0.8621 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -126.43 +/- 27.04 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DevsIA/Devs_IA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 296.42 +/- 11.35 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 ---
Dhruva/Interstellar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 105.84 +/- 83.18 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DicoTiar/wisdomfiy
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: NER_EHR_Spanish_model_Mulitlingual_BERT results: [] widget: - text: 'Presentamos el caso de una mujer de 30 años, fumadora de 20 cigarrillos/día y sin otros antecedentes personales de interés. La paciente refiere infecciones urinarias de repetición. Se indica realización de ecografía abdominal, observándose una lesión nodular intravesical, por lo que es derivada a consulta de urología. En cistoscopia se visualiza tumoración exofítica de 3x3 cms. en cara lateral derecha con mucosa vesical íntegra, no encontrándose alteraciones en el resto de la vejiga. Se realiza exploración bajo anestesia (EBA) y resección transuretral de dicha lesión (RTU). En el informe de anatomía patológica macroscópicamente se describen fragmentos de pared vesical con urotelio conservado sin displasia, destacando en la capa muscular propia y en continuidad con el tejido muscular de la misma, una tumoración fusocelular con células que muestran unos núcleos de gran tamaño, pleomórficos, de aspecto vesiculoso y unos citoplasmas amplios eosinófilos. Esta celularidad se dispone en formas de fascículos mal definidos y entre la misma se reconoce abundante celularidad constituida fundamentalmente por numerosas células plasmáticas y leucocitos polimorfonucleares eosinófilos. No se observa un índice mitótico elevado, aunque el índice de proliferación medido como positividad nuclear con anticuerpos frente a MIB-1 se encuentra entre el 10 y el 25% de la celularidad tumoral. No se han objetivado áreas de necrosis. En estudio inmunohistoquímico se observa marcada positividad frente a citoqueratinas (AE1/AE3) y CAM5.2 a nivel citoplasmático, así como una marcada positividad citoplasmática con anticuerpos frente a p80 (proteína ALK). La celularidad descrita ha resultado negativa con anticuerpos frente a músculo liso (actina de músculo liso, MyO D1 y Calretinina), así como para CEA y citoqueratinas de alto peso molecular, observándose tan sólo positividad focal y aislada frente a EMA. Tras realización de FISH sobre material parafinado no se evidencia traslocación en el gen de la ALK. El diagnóstico anatomopatológico definitivo es tumor miofibroblástico inflamatorio vesical.' --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # NER_EHR_Spanish_model_Mulitlingual_BERT This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on the DisTEMIST shared task 2022 dataset. It is available at: https://temu.bsc.es/distemist/category/data/ It achieves the following results on the evaluation set: - Loss: 0.2603 - Precision: 0.5637 - Recall: 0.5801 - F1: 0.5718 - Accuracy: 0.9534 ## Model description For a complete description of our system, please go to: https://ceur-ws.org/Vol-3180/paper-26.pdf ## Training and evaluation data Dataset provided by DisTEMIST shared task, it is available at: https://temu.bsc.es/distemist/category/data/ ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 7 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 71 | 0.2060 | 0.5017 | 0.5540 | 0.5266 | 0.9496 | | No log | 2.0 | 142 | 0.2163 | 0.5363 | 0.5433 | 0.5398 | 0.9495 | | No log | 3.0 | 213 | 0.2245 | 0.5521 | 0.5356 | 0.5438 | 0.9514 | | No log | 4.0 | 284 | 0.2453 | 0.5668 | 0.5985 | 0.5822 | 0.9522 | | No log | 5.0 | 355 | 0.2433 | 0.5657 | 0.5579 | 0.5617 | 0.9530 | | No log | 6.0 | 426 | 0.2553 | 0.5762 | 0.5762 | 0.5762 | 0.9536 | | No log | 7.0 | 497 | 0.2603 | 0.5637 | 0.5801 | 0.5718 | 0.9534 | ### How to cite this work: Tamayo, A., Burgos, D. A., & Gelbukh, A. (2022). mbert and simple post-processing: A baseline for disease mention detection in spanish. In Working Notes of Conference and Labs of the Evaluation (CLEF) Forum. CEUR Workshop Proceedings. @inproceedings{tamayo2022mbert, title={mbert and simple post-processing: A baseline for disease mention detection in spanish}, author={Tamayo, Antonio and Burgos, Diego A and Gelbukh, Alexander}, booktitle={Working Notes of Conference and Labs of the Evaluation (CLEF) Forum. CEUR Workshop Proceedings}, year={2022} } ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
Doiman/DialoGPT-medium-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2022-05-11T04:00:17Z
https://twitter.com/itsnewkknpenari https://www.imdb.com/list/ls560673031/ https://www.imdb.com/list/ls560673012/ https://www.imdb.com/list/ls560673016/ https://www.imdb.com/list/ls560673011/ https://www.imdb.com/list/ls560673015/ https://www.imdb.com/list/ls560671888/ https://www.imdb.com/list/ls560671889/ https://www.imdb.com/list/ls560671886/ https://www.imdb.com/list/ls560671887/ https://www.imdb.com/list/ls560671863/ https://www.imdb.com/list/ls560671861/ https://www.imdb.com/list/ls560671867/ https://www.imdb.com/list/ls560671838/ https://www.imdb.com/list/ls560671834/ https://www.imdb.com/list/ls560671874/ https://www.imdb.com/list/ls560671872/ https://www.imdb.com/list/ls560671876/ https://www.imdb.com/list/ls560671871/ https://www.imdb.com/list/ls560671877/ https://www.imdb.com/list/ls560671989/ https://www.imdb.com/list/ls560671984/ https://www.imdb.com/list/ls560671986/ https://www.imdb.com/list/ls560671981/ https://www.imdb.com/list/ls560671985/ https://www.imdb.com/list/ls560671975/ https://www.imdb.com/list/ls560671958/ https://yamcode.com/ghfghytgfhdfhsyry https://p.ip.fi/-OCT https://jsitor.com/p2fe-7qNs https://dotnetfiddle.net/F1iQph http://cpp.sh/65owq http://ben-kiki.org/ypaste/data/53931/index.html https://tech.io/snippet/iKXdcby https://jsfiddle.net/hpikic3si/p1fs0n5u/ https://ideone.com/2hVdsX https://www.onlinegdb.com/edit/PRPhACxGd https://apaste.info/U7kC https://ide.geeksforgeeks.org/c1b95b3c-1625-499c-947c-1abde55e2889 https://paste.tbee-clan.de/NmwFj http://paste.akingi.com/MlvfQAv9 https://jsonformatter.org/921ca8 https://bitbin.it/x9nLXV3M/ http://nopaste.ceske-hry.cz/392787 https://coliru.stacked-crooked.com/a/b985e04043dbb9e9 https://tempel.in/view/a616945e https://p.teknik.io/XZAE2 https://pastelink.net/79h8vyue https://codebeautify.org/htmlviewer/y220638c4 https://controlc.com/b1bce406 https://etextpad.com/p75tq0n6ph http://www.everfall.com/paste/id.php?995p9ibpgf58 https://gitlab.gnome.org/-/snippets/3455 https://ghostbin.com/oHpNr https://www.toptal.com/developers/hastebin/camiduziwu.less https://paste.mozilla.org/PUsYMEY2 https://paste.thezomg.com/158615/52238810/ https://geany.org/p/GndR6/ https://n0paste.tk/072lCka/ https://note.vg/jgjduthdrfhfhjfkfkhfl https://0bin.net/paste/e7Jaz4wc#FSUC3CH4TNmty3e6zXXVENi1pcnYo+C2yC54yTnPo9Z https://anotepad.com/notes/5a596esp https://paiza.io/projects/pYpvqtw21QbyhoGMjlpDsg https://paste.artemix.org/-/Z7crPP https://paste2.org/z5PvMA6f http://paste4btc.com/7h0MuBV0 https://pasteall.org/EMsK https://pastebin.com/zneMaKzc https://paste-bin.xyz/56967 https://www.pastebin.pt/?2c6fc63412f87107#PY3BflyBD/iaONdZK3KVT842RqUFCa4YKuJ3O67lL1k= http://pastebin.ws/fm6qw9 https://dev.bukkit.org/paste/5afa2cb9 https://pastecode.ru/2a336b/ https://paste.ee/p/gXWDA https://pastehere.xyz/aYFWy9fW/ http://paste.jp/ab6e6b3c/ https://pastehouse.com/pfgtrh2wby https://pasteio.com/x74TClXIGl6w https://paste.rs/xYl https://www.wowace.com/paste/b859abfe https://wow.curseforge.com/paste/5c204506 https://onecompiler.com/php/3y3pt2w4y https://psty.io/p?q=9c21d https://snippet.host/aoto https://telegra.ph/ikujkjgfrityi-05-11 https://rextester.com/SODN47408 https://tutpaste.com/80464-tuyghfghdfyureuti/ https://sebsauvage.net/paste/?c057a8bd5e97dc3e#sh7Yew/KWhuyj68fxLvNb3CPZ1Qwq/VjePf9KGlJYmA= https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634783 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634827 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634908 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634782 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634507 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634826 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634781 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634506 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634686 https://godselectpeople.ning.com/m/status?id=3155939%3AStatus%3A634825 http://ning.spruz.com/pt/viral-mesum-indonesia-25juta-klik-linknya.5-10-2022/blog.htm https://caribbeanfever.com/photo/albums/tyujyhjgdjdfurtu http://mcspartners.ning.com/photo/albums/tyuyjgdyreuyutu https://www.onfeetnation.com/photo/albums/tuyghfdhjfutute http://personalgrowthsystems.ning.com/photo/albums/tyyhjngdjj http://playit4ward-sanantonio.ning.com/photo/albums/tyjyhjdgjdjdgjg https://stationfm.ning.com/photo/albums/tyuyhjdgjdgjgk http://taylorhicks.ning.com/photo/albums/dgbgytredfgvdsgd https://webhitlist.com/photo/albums/tujyhjtghfhsh https://tnfdjs.ning.com/profiles/blogs/rolex-anti-rudal-russia http://beterhbo.ning.com/m/blogpost?id=6409057%3ABlogPost%3A3489438 http://allabouturanch.com/m/blogpost?id=6285615%3ABlogPost%3A924401 https://www.kemovebbs.com/Thread-new-pallapa-sayang-3-mp4 https://www.kemovebbs.com/Thread-New-Monata-MP4-terbaru-full-koplo-bass-legit https://warosu.org/jp/thread/S28181518#p28185549_1086
DongHyoungLee/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2022-05-11T04:19:42Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: single_label_N_max results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # single_label_N_max This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.9326 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.9746 | 1.0 | 674 | 2.0265 | | 1.6756 | 2.0 | 1348 | 1.9134 | | 1.1333 | 3.0 | 2022 | 1.9326 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
DongHyoungLee/kogpt2-base-v2-finetuned-kogpt2_nsmc_single_sentence_classification
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 215.32 +/- 46.32 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Dongmin/testmodel
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
11
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 173.71 +/- 111.75 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Waynehillsdev/Wayne_NLP_mT5
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2022-05-11T05:31:27Z
--- license: afl-3.0 --- SalamaThanks Transformer for English-to-Filipino Text Translation version 1. Based on the Helsinki-NLP/opus-mt-en-tl transformer model.
Doogie/Waynehills-KE-T5-doogie
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: afl-3.0 --- SalamaThanks Transformer for Filipino-to-English Text Translation version 1. Based on the Helsinki-NLP/opus-mt-tl-en transformer model.
Waynehillsdev/Waynehills-STT-doogie-server
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
--- license: afl-3.0 --- SalamaThanks Transformer for English-to-Filipino Text Translation version 2. A finetuned transformer model based on the Helsinki-NLP/opus-mt-en-tl transformer model.
Waynehillsdev/waynehills_sentimental_kor
[ "pytorch", "electra", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
2022-05-11T05:42:28Z
--- license: afl-3.0 --- SalamaThanks Transformer for Filipino-to-English Text Translation version 2. A finetuned model based on the Helsinki-NLP/opus-mt-en-tl transformer model.
Doohae/p_encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: unique_N_max results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # unique_N_max This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7409 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.0901 | 1.0 | 1162 | 1.8326 | | 1.5479 | 2.0 | 2324 | 1.7201 | | 1.2903 | 3.0 | 3486 | 1.7409 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
Doohae/roberta
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 39289 with parameters: ``` {'batch_size': 8} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3928, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Doquey/DialoGPT-small-Luisbot1
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-11T06:24:56Z
--- license: mit language: de pipeline_tag: text-generation widget: - text: "In einer schockierenden Entdeckung fanden Wissenschaftler eine Herde Einhörner, die in " example_title: "Einhörner ..." - text: |- Definiere folgende Wörter Wort: Einhorn Definition: Das Einhorn ist ein Fabelwesen von Pferde- oder Ziegengestalt mit einem geraden Horn auf der Stirnmitte. Wort: Regierungschef Definition: Der Regierungschef ist der Leiter der Regierung eines Staates (z. B. National- oder Gliedstaat). Wort: Waffendrill Definition: example_title: "Definiere ..." --- # German GPT2-XL (1.5B) - trained with [BigScience's DeepSpeed-Megatron-LM code base](https://github.com/bigscience-workshop/Megatron-DeepSpeed) - word embedding initialized with [WECHSEL](https://arxiv.org/abs/2112.06598) and all other weights taken from English [gpt2-xl](https://huggingface.co/gpt2-xl) - ~ 3 days on 16xA100 GPUs (~ 80 TFLOPs / GPU) - stopped after 100k steps - 26.2B tokens - less than a single epoch on `oscar_unshuffled_deduplicated_de` (excluding validation set; original model was trained for 75 epochs on less data) - bf16 - zero stage 0 - tp/pp = 1 ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='malteos/gpt2-xl-wechsel-german') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('malteos/gpt2-xl-wechsel-german') model = GPT2Model.from_pretrained('malteos/gpt2-xl-wechsel-german') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ## Evaluation | Model (size) | PPL | |---|---| | `gpt2-xl-wechsel-german` (1.5B) | **14.5** | | `gpt2-wechsel-german-ds-meg` (117M) | 26.4 | | `gpt2-wechsel-german` (117M) | 26.8 | | `gpt2` (retrained from scratch) (117M) | 27.63 | ## License MIT
DoyyingFace/bert-COVID-HATE-finetuned-test
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 117759 with parameters: ``` {'batch_size': 8, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.SoftmaxLoss.SoftmaxLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 11775, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-12
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- language: - vi tags: - capitalization - punctuation - token-classification license: cc-by-sa-4.0 datasets: - oscar-corpus/OSCAR-2109 metrics: - accuracy - precision - recall - f1 --- # ✨ vibert-capitalization-punctuation This a [viBERT](https://huggingface.co/FPTAI/vibert-base-cased) model finetuned for punctuation restoration on the [OSCAR-2109](https://huggingface.co/datasets/oscar-corpus/OSCAR-2109) dataset. The model predicts the punctuation and upper-casing of plain, lower-cased text. An example use case can be ASR output. Or other cases when text has lost punctuation. This model is intended for direct use as a punctuation restoration model for the general Vietnamese language. Alternatively, you can use this for further fine-tuning on domain-specific texts for punctuation restoration tasks. Model restores the following punctuations -- **[. , : ? ]** The model also restores the complex upper-casing of words like *YouTube*, *MobiFone*. ----------------------------------------------- ## 🚋 Usage **Below is a quick way to get up and running with the model.** 1. Download files from hub ```python import os import shutil import sys from huggingface_hub import snapshot_download cache_dir = "./capu" def download_files(repo_id, cache_dir=None, ignore_regex=None): download_dir = snapshot_download(repo_id=repo_id, cache_dir=cache_dir, ignore_regex=ignore_regex) if cache_dir is None or download_dir == cache_dir: return download_dir file_names = os.listdir(download_dir) for file_name in file_names: shutil.move(os.path.join(download_dir, file_name), cache_dir) os.rmdir(download_dir) return cache_dir cache_dir = download_files(repo_id="dragonSwing/vibert-capu", cache_dir=cache_dir, ignore_regex=["*.json", "*.bin"]) sys.path.append(cache_dir) ``` 2. Sample python code ```python import os from gec_model import GecBERTModel model = GecBERTModel( vocab_path=os.path.join(cache_dir, "vocabulary"), model_paths="dragonSwing/vibert-capu", split_chunk=True ) model("theo đó thủ tướng dự kiến tiếp bộ trưởng nông nghiệp mỹ tom wilsack bộ trưởng thương mại mỹ gina raimondo bộ trưởng tài chính janet yellen gặp gỡ thượng nghị sĩ patrick leahy và một số nghị sĩ mỹ khác") # Always return list of outputs. # ['Theo đó, Thủ tướng dự kiến tiếp Bộ trưởng Nông nghiệp Mỹ Tom Wilsack, Bộ trưởng Thương mại Mỹ Gina Raimondo, Bộ trưởng Tài chính Janet Yellen, gặp gỡ Thượng nghị sĩ Patrick Leahy và một số nghị sĩ Mỹ khác.'] model("những gói cước năm g mobifone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời so với mạng bốn g thì tốc độ truy cập mạng 5 g mobifone được nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần") # ['Những gói cước 5G MobiFone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời. So với mạng 4G thì tốc độ truy cập mạng 5G MobiFone được nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần.'] ``` **This model can work on arbitrarily large text in Vietnamese language.** ----------------------------------------------- ## 📡 Training data Here is the number of product reviews we used for fine-tuning the model: | Language | Number of text samples | | --- | --- | | Vietnamese | 5,600,000 | ----------------------------------------------- ## 🎯 Accuracy Below is a breakdown of the performance of the model by each label on 10,000 held-out text samples: | label | precision | recall | f1-score | support | | --- | --- | --- | --- | --- | | **Upper** | 0.88 | 0.89 | 0.89 | 56497 | | **Complex-Upper** | 0.92 | 0.83 | 0.88 | 480 | | **.** | 0.81 | 0.82 | 0.82 | 18139 | | **,** | 0.73 | 0.70 | 0.71 | 22961 | | **:** | 0.74 | 0.56 | 0.64 | 1432 | | **?** | 0.80 | 0.76 | 0.78 | 1730 | | **none** | 0.99 | 0.99 | 0.99 |475611 | -----------------------------------------------
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-4
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
null
--- language: - vi tags: - capitalization - punctuation - token-classification license: cc-by-sa-4.0 datasets: - oscar-corpus/OSCAR-2109 metrics: - accuracy - precision - recall - f1 --- # ✨ xlm-roberta-capitalization-punctuation This a [XLM-RoBERTa](https://huggingface.co/xlm-roberta-base) model finetuned for Vietnamese punctuation restoration on the [OSCAR-2109](https://huggingface.co/datasets/oscar-corpus/OSCAR-2109) dataset. The model predicts the punctuation and upper-casing of plain, lower-cased text. An example use case can be ASR output. Or other cases when text has lost punctuation. This model is intended for direct use as a punctuation restoration model for the general Vietnamese language. Alternatively, you can use this for further fine-tuning on domain-specific texts for punctuation restoration tasks. Model restores the following punctuations -- **[. , : ? ]** The model also restores the complex upper-casing of words like *YouTube*, *MobiFone*. ----------------------------------------------- ## 🚋 Usage **Below is a quick way to get up and running with the model.** 1. Download files from hub ```python import os import shutil import sys from huggingface_hub import snapshot_download cache_dir = "./capu" def download_files(repo_id, cache_dir=None, ignore_regex=None): download_dir = snapshot_download(repo_id=repo_id, cache_dir=cache_dir, ignore_regex=ignore_regex) if cache_dir is None or download_dir == cache_dir: return download_dir file_names = os.listdir(download_dir) for file_name in file_names: shutil.move(os.path.join(download_dir, file_name), cache_dir) os.rmdir(download_dir) return cache_dir cache_dir = download_files(repo_id="dragonSwing/xlm-roberta-capu", cache_dir=cache_dir, ignore_regex=["*.json", "*.bin"]) sys.path.append(cache_dir) ``` 2. Sample python code ```python import os from gec_model import GecBERTModel model = GecBERTModel( vocab_path=os.path.join(cache_dir, "vocabulary"), model_paths="dragonSwing/xlm-roberta-capu", split_chunk=True ) model("theo đó thủ tướng dự kiến tiếp bộ trưởng nông nghiệp mỹ tom wilsack bộ trưởng thương mại mỹ gina raimondo bộ trưởng tài chính janet yellen gặp gỡ thượng nghị sĩ patrick leahy và một số nghị sĩ mỹ khác") # Always return list of outputs. # ['Theo đó, Thủ tướng dự kiến tiếp Bộ trưởng Nông nghiệp Mỹ Tom Wilsack, Bộ trưởng Thương mại Mỹ Gina Raimondo, Bộ trưởng Tài chính Janet Yellen, gặp gỡ Thượng nghị sĩ Patrick Leahy và một số nghị sĩ Mỹ khác.'] model("những gói cước năm g mobifone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời so với mạng bốn g thì tốc độ truy cập mạng 5 g mobifone được nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần") # ['Những gói cước 5G MobiFone sẽ mang đến cho bạn những trải nghiệm mới lạ trên cả tuyệt vời. So với mạng 4G thì tốc độ truy cập mạng 5G MobiFone được Nhận định là siêu đỉnh với mức truy cập nhanh gấp 10 lần.'] ``` **This model can work on arbitrarily large text in Vietnamese language.** ----------------------------------------------- ## 📡 Training data Here is the number of product reviews we used for fine-tuning the model: | Language | Number of text samples | | --- | --- | | Vietnamese | 5,600,000 | ----------------------------------------------- ## 🎯 Accuracy Below is a breakdown of the performance of the model by each label on 10,000 held-out text samples: | label | precision | recall | f1-score | support | | --- | --- | --- | --- | --- | | **Upper** | 0.89 | 0.90 | 0.89 | 56497 | | **Complex-Upper** | 0.93 | 0.83 | 0.88 | 480 | | **.** | 0.81 | 0.84 | 0.82 | 18139 | | **,** | 0.69 | 0.75 | 0.72 | 22961 | | **:** | 0.76 | 0.60 | 0.67 | 1432 | | **?** | 0.82 | 0.75 | 0.78 | 1730 | | **none** | 0.99 | 0.99 | 0.99 |475611 | -----------------------------------------------
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-25
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1521957986335297536/itVSA7l0_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1446623190252343301/qIJAwo9I_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Elon Musk & Kim Kardashian</div> <div style="text-align: center; font-size: 14px;">@elonmusk-kimkardashian</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Elon Musk & Kim Kardashian. | Data | Elon Musk | Kim Kardashian | | --- | --- | --- | | Tweets downloaded | 222 | 3241 | | Retweets | 16 | 715 | | Short tweets | 47 | 667 | | Tweets kept | 159 | 1859 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/17bd0o7t/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @elonmusk-kimkardashian's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2g9hft2n) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2g9hft2n/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/elonmusk-kimkardashian') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-75
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 291.52 +/- 22.96 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code