modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
D3vil/DialoGPT-smaall-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-protagonist-english results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-protagonist-english This model is a fine-tuned version of [Jean-Baptiste/roberta-large-ner-english](https://huggingface.co/Jean-Baptiste/roberta-large-ner-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0630 - Precision: 0.8646 - Recall: 0.8839 - F1: 0.8742 - Accuracy: 0.9876 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 25 | 0.0659 | 0.8860 | 0.9018 | 0.8938 | 0.9862 | | No log | 2.0 | 50 | 0.0583 | 0.8553 | 0.8705 | 0.8628 | 0.9860 | | No log | 3.0 | 75 | 0.0593 | 0.8728 | 0.8884 | 0.8805 | 0.9876 | | No log | 4.0 | 100 | 0.0622 | 0.8559 | 0.875 | 0.8653 | 0.9871 | | No log | 5.0 | 125 | 0.0630 | 0.8646 | 0.8839 | 0.8742 | 0.9876 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2+cu102 - Datasets 2.2.1 - Tokenizers 0.11.0
DSI/personal_sentiment
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-protagonist-english-pc results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-protagonist-english-pc This model is a fine-tuned version of [Jean-Baptiste/roberta-large-ner-english](https://huggingface.co/Jean-Baptiste/roberta-large-ner-english) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0351 - Precision: 0.9513 - Recall: 0.9598 - F1: 0.9556 - Accuracy: 0.9919 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 100 | 0.0407 | 0.9254 | 0.9420 | 0.9336 | 0.9908 | | No log | 2.0 | 200 | 0.0351 | 0.9513 | 0.9598 | 0.9556 | 0.9919 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.10.1+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
DTAI-KULeuven/mbert-corona-tweets-belgium-curfew-support
[ "pytorch", "jax", "bert", "text-classification", "multilingual", "nl", "fr", "en", "arxiv:2104.09947", "transformers", "Tweets", "Sentiment analysis" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2022-05-18T15:31:34Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: -143.18 +/- 62.58 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **DQN** Agent playing **LunarLander-v2** This is a trained model of a **DQN** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
alexandrainst/da-emotion-classification-base
[ "pytorch", "tf", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
837
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: hubert-base-cc-finetuned-forum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hubert-base-cc-finetuned-forum This model is a fine-tuned version of [SZTAKI-HLT/hubert-base-cc](https://huggingface.co/SZTAKI-HLT/hubert-base-cc) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.4746 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.7966 | 1.0 | 157 | 2.5139 | | 2.6303 | 2.0 | 314 | 2.4601 | | 2.5525 | 3.0 | 471 | 2.4501 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0a0+17540c5 - Datasets 2.2.1 - Tokenizers 0.12.1
alexandrainst/da-subjectivivity-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "dataset:DDSC/twitter-sent", "dataset:DDSC/europarl", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
846
null
--- tags: - generated_from_trainer model-index: - name: deep-pavlov-full-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deep-pavlov-full-2 This model is a fine-tuned version of [DeepPavlov/rubert-base-cased](https://huggingface.co/DeepPavlov/rubert-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0892 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 18 - eval_batch_size: 18 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.0425 | 1.0 | 2513 | 1.0277 | | 0.7953 | 2.0 | 5026 | 1.0226 | | 0.5902 | 3.0 | 7539 | 1.0892 | ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2.dev0 - Tokenizers 0.12.1
alexandrainst/da-hatespeech-detection-small
[ "pytorch", "electra", "text-classification", "da", "transformers", "license:cc-by-4.0" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,506
null
--- tags: - spacy - token-classification language: - en model-index: - name: en_sdoh_roberta_cui results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8016997167 - name: NER Recall type: recall value: 0.7526595745 - name: NER F Score type: f_score value: 0.7764060357 widget: - text: "She lives in Oakland with her bf and commutes to work by bus." example_title: "SDOH NER example 1" - text: "There are some logistical barriers, as she lives in Oakland and works at the VA, commuting by bus" example_title: "SDOH NER example 2" - text: "I have also been very moody/tearful and generally depressed on a daily basis" example_title: "SDOH NER example 3" - text: "Patient is a 85 yo widow, who lives in section 8 housing. She is occasionally visited by her daughter and grandchildren." example_title: "SDOH NER example 4" --- RoBERTA based Social determinants of health NER with a standard ontology extension interface (SNOMED_CT and CUI) | Feature | Description | | --- | --- | | **Name** | `en_sdoh_roberta_cui` | | **Version** | `0.0.0` | | **spaCy** | `>=3.2.1,<3.3.0` | | **Default Pipeline** | `transformer`, `ner`, `sdoh_cui` | | **Components** | `transformer`, `ner`, `sdoh_cui` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [Dima Lituiev](mailto:[email protected]) | | **Reference** | [Automatic Extraction of Social Determinants of Health from Medical Notes of Chronic Lower Back Pain Patients](https://www.medrxiv.org/content/10.1101/2022.03.04.22271541v1) | ### Label Scheme <details> <summary>View label scheme (52 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `Anxiety: GAD`, `Anxiety: Generalized Anxiety Disorder`, `Anxiety: Level of anxiety`, `Anxiety: NA`, `Anxiety: Signs and symptoms of anxiety`, `Anxiety: family hx: Anxiety state`, `Anxiety: hx of anxiety state`, `Depression: Family hx: Depression`, `Depression: Major depressive disorder`, `Depression: NA`, `Depression: PHQ`, `Depression: Symptoms of depression`, `Depression: hx of Depression`, `Financial_strain: Financial problem`, `Financial_strain: Financially secure`, `Financial_strain: NA`, `Financial_strain: Unable to afford medication`, `Food: Able to obtain food`, `Food: Fruit and vegetable intake`, `Food: NA`, `Housing: Homeless`, `Housing: Housing unsuited to needs`, `Housing: Marginally housed`, `Housing: NA`, `Housing: Stably housed`, `Housing: Subsidized housing`, `Housing: lives in facility`, `Insurance_status: Inadequate healthcare resources`, `Insurance_status: NA`, `Marital_or_partnership_status: Divorced`, `Marital_or_partnership_status: Engaged to be married`, `Marital_or_partnership_status: Married`, `Marital_or_partnership_status: NA`, `Marital_or_partnership_status: Partner`, `Marital_or_partnership_status: Partner relationship problem`, `Marital_or_partnership_status: Separated`, `Marital_or_partnership_status: Single person`, `Marital_or_partnership_status: Widowed`, `Social_isolation: At risk for loneliness`, `Social_isolation: Has social support`, `Social_isolation: Lives alone`, `Social_isolation: Lives with`, `Social_isolation: NA`, `Social_isolation: Personal relationship breakdown`, `Social_isolation: Social Isolation`, `Transportation: Has access to a car`, `Transportation: Has access to public transport vehicle`, `Transportation: NA`, `Transportation: Transportation problems`, `pain_and_disability: NA`, `pain_and_disability: Pain intensity rating scale, current`, `pain_and_disability: Pain intensity rating scale, worst` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 77.64 | | `ENTS_P` | 80.17 | | `ENTS_R` | 75.27 | | `TRANSFORMER_LOSS` | 37708138.10 | | `NER_LOSS` | 1444086.80 |
alexandrainst/da-ned-base
[ "pytorch", "tf", "xlm-roberta", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
2022-05-18T17:36:00Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 165.66 +/- 64.55 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Daiki/scibert_scivocab_uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-18T18:01:43Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 286.33 +/- 8.54 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Dazai/Ok
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 108.15 +/- 153.65 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/Breitbart_modelv7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln45") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln45") ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` infill: chrome extensions [MASK] accomplish everyday tasks. Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks. infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices. infill: ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - declining viewership facing the nba. - does not have to be this way. - in fact, many solutions exist. - the four point line would surely draw in eyes. text: failing to draw in the masses, the nba has ( fallen into / succumb to / bowed to ) disrepair. such does not have to be the case, however. in fact, a myriad of simple, relatively cheap ( solutions / interventions / enhancements ) could revive the league. the addition of the much-hyped four-point line would surely juice viewership. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ```
Declan/CNN_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: autotrain language: unk widget: - text: "I love AutoTrain 🤗" datasets: - priyamm/autotrain-data-KeywordExtraction co2_eq_emissions: 0.21373468108000182 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 882328335 - CO2 Emissions (in grams): 0.21373468108000182 ## Validation Metrics - Loss: 0.2641160488128662 - Accuracy: 0.9128 - Precision: 0.9444444444444444 - Recall: 0.8772 - AUC: 0.9709556000000001 - F1: 0.9095810866860223 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/priyamm/autotrain-KeywordExtraction-882328335 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("priyamm/autotrain-KeywordExtraction-882328335", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("priyamm/autotrain-KeywordExtraction-882328335", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Declan/CNN_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 164.44 +/- 115.97 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Declan/FoxNews_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: en tags: grs --- ## Citation Please star the [GRS GitHub repo](https://github.com/imohammad12/GRS) and cite the paper if you found our model useful: ``` @inproceedings{dehghan-etal-2022-grs, title = "{GRS}: Combining Generation and Revision in Unsupervised Sentence Simplification", author = "Dehghan, Mohammad and Kumar, Dhruv and Golab, Lukasz", booktitle = "Findings of the Association for Computational Linguistics: ACL 2022", month = may, year = "2022", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.findings-acl.77", pages = "949--960", abstract = "We propose GRS: an unsupervised approach to sentence simplification that combines text generation and text revision. We start with an iterative framework in which an input sentence is revised using explicit edit operations, and add paraphrasing as a new edit operation. This allows us to combine the advantages of generative and revision-based approaches: paraphrasing captures complex edit operations, and the use of explicit edit operations in an iterative manner provides controllability and interpretability. We demonstrate these advantages of GRS compared to existing methods on the Newsela and ASSET datasets.", } ```
Declan/HuffPost_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - image-classification - generated_from_trainer datasets: - food101 metrics: - accuracy model-index: - name: vit-base-food101-demo-v5 results: - task: name: Image Classification type: image-classification dataset: name: food101 type: food101 config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.8539405940594059 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-food101-demo-v5 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 0.5493 - Accuracy: 0.8539 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 1.657 | 1.0 | 4735 | 0.9732 | 0.7459 | | 0.9869 | 2.0 | 9470 | 0.7987 | 0.7884 | | 0.71 | 3.0 | 14205 | 0.6364 | 0.8311 | | 0.4961 | 4.0 | 18940 | 0.5595 | 0.8487 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
Declan/NewYorkPost_model_v1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-19T03:37:09Z
--- language: en thumbnail: http://www.huggingtweets.com/lightcrypto-sergeynazarov/1652931465147/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/751118197126991873/eSXubsCD_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1478019214212747264/LZmNClhs_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Sergey Nazarov & light</div> <div style="text-align: center; font-size: 14px;">@lightcrypto-sergeynazarov</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Sergey Nazarov & light. | Data | Sergey Nazarov | light | | --- | --- | --- | | Tweets downloaded | 718 | 3237 | | Retweets | 162 | 367 | | Short tweets | 11 | 405 | | Tweets kept | 545 | 2465 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/pe3nb090/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @lightcrypto-sergeynazarov's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1am840oh) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1am840oh/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/lightcrypto-sergeynazarov') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Declan/NewYorkTimes_model_v3
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: rob2rand_chen results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rob2rand_chen This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 50 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.18.0 - Pytorch 1.7.1 - Datasets 2.1.0 - Tokenizers 0.12.1
Declan/Reuters_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wmt14 model-index: - name: opus-mt-en-de-finetuned-de-to-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-en-de-finetuned-de-to-en This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) on the wmt14 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.2 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
DeepPavlov/marianmt-tatoeba-ruen
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: Boglinger/mt5-small-klex results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Boglinger/mt5-small-klex This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 4.7908 - Validation Loss: 3.2086 - Epoch: 19 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5.6e-05, 'decay_steps': 2344, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 10.4780 | 4.4037 | 0 | | 6.5474 | 3.8111 | 1 | | 5.7133 | 3.5425 | 2 | | 5.3385 | 3.3847 | 3 | | 5.0827 | 3.3011 | 4 | | 4.9326 | 3.2479 | 5 | | 4.8377 | 3.2185 | 6 | | 4.7692 | 3.2086 | 7 | | 4.7478 | 3.2086 | 8 | | 4.7534 | 3.2086 | 9 | | 4.7665 | 3.2086 | 10 | | 4.7779 | 3.2086 | 11 | | 4.7689 | 3.2086 | 12 | | 4.7838 | 3.2086 | 13 | | 4.7881 | 3.2086 | 14 | | 4.7869 | 3.2086 | 15 | | 4.7612 | 3.2086 | 16 | | 4.7667 | 3.2086 | 17 | | 4.7581 | 3.2086 | 18 | | 4.7908 | 3.2086 | 19 | ### Framework versions - Transformers 4.19.2 - TensorFlow 2.8.0 - Datasets 2.2.1 - Tokenizers 0.12.1
DeepPavlov/roberta-large-winogrande
[ "pytorch", "roberta", "text-classification", "en", "dataset:winogrande", "arxiv:1907.11692", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
348
null
--- language: en --- # UnifiedQA-Reddit-SYAC This is an abstractive title answering (TA) / clickbait spoiling model. This is a variant of [allenai/unifiedqa-t5-large](https://huggingface.co/allenai/unifiedqa-t5-large), fine-tuned on the Reddit SYAC dataset. The model was trained as part of my masters thesis: _Abstractive title answering for clickbait content_ ### Disinformation This model has the proven capability of generating, and hallucinating false information. Any use of a TA system such as this one should be with knowledge of this risk. ## Performance ### Intrinsic The following scores is the result of intrinsic evaluation on the Reddit SYAC test set. We used a max input length of 2048 and truncated the tokens exceeding this limit. | rouge1 | rouge2 | rougeL | bleu | meteor | |:----------|:----------|:----------|:----------|:---------| | **44.58** | **23.89** | **43.45** | 17.46 | 36.22 | ### Qualtiy Using human evaluation, we measured model performance by asking the evaluators to rate the models on a scale from 1 to 5 on how good their generated answer was for a given clickbait article. Mean quality = 4.065 ### Factuality We included a factuality assessment to address the issue of generating false information. Human raters were asked to place each output in the categories "True", "Irrelevant", and "False". | True | Irrelevant | False | |:-------:|:----------:|:--------:| | 85% | 7.5% | 7.5% | ## Cite If you use this model, please cite my master's thesis ``` @mastersthesis{heiervang2022AbstractiveTA title={Abstractive title answering for clickbait content}, author={Markus Sverdvik Heiervang}, publisher={University of Oslo, Department of Informatics}, year={2022} } ```
DeepPavlov/rubert-base-cased
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "arxiv:1905.07213", "transformers", "has_space" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
148,127
null
--- pipeline_tag: text-classification language: - nl tags: - text classification - sentiment analysis - domain adaptation widget: - text: "De NMBS heeft recent de airconditioning in alle treinen vernieuwd." example_title: "POS-NMBS" - text: "De wegenwerken langs de E34 blijven al maanden aanhouden." example_title: "NEG-AWV" - text: "Natuur en Bos is erin geslaagd 100 hectaren bosgebied te beschermen." example_title: "POS-ANB" - text: "Het FWO financiert te weinig excellent onderzoek." example_title: "NEG-FWO" - text: "De Lijn is op zoek naar nieuwe buschauffeurs." example_title: "NEU-De Lijn" --- # RePublic ### Model description RePublic (<u>re</u>putation analyzer for <u>public</u> service organizations) is a Dutch BERT model based on BERTje (De Vries, 2019). The model was designed to predict the sentiment in Dutch-language news article text about public agencies. RePublic was developed by [CLiPS](https://www.uantwerpen.be/en/research-groups/clips/) in collaboration with Prof. Dr. [Jan Boon](https://www.uhasselt.be/nl/wie-is-wie/jan-boon). ### How to use The model can be loaded and used to make predictions as follows: ```python from transformers import pipeline model_path = 'clips/republic' pipe = pipeline(task="text-classification", model=model_path, tokenizer=model_path) text = … # load your text here output = pipe(text) prediction = output[0]['label'] # 0=”neutral”; 1=”positive”; 2=”negative” ``` ### Training data and procedure RePublic was domain-adapted on 91 661 Flemish news articles from three popular Flemish news providers between 2000 and 2020 (“Het Laatste Nieuws”, “Het Nieuwsblad” and “De Morgen”). These articles mention at least one out of a pre-defined list of 24 public service organizations, which contains, a.o., De Lijn (public transport organization), VDAB (Flemish job placement service), and Agentschap Zorg en Gezondheid (healthcare service). The domain adaptation was achieved by performing BERT’s language modeling tasks (masked language modeling & next sentence prediction). The model was then fine-tuned on a sentiment classification task (“positive”, “negative”, “neutral”). The supervised data consisted of 4404 annotated sentences mentioning Flemish public agencies of which 1257 sentences were positive, 1485 sentences were negative and 1662 sentences were neutral. Fine-tuning was performed for 4 epochs using a batch size of 8 and a learning rate of 5e-5. In order to evaluate the model, a 10-fold cross validation experiment was conducted. The results of this experiment can be found below. | **Class** | **Precision (%)** | **Recall (%)** | **F1-score (%)** | |:---:|:---:|:---:|:---:| | _Positive_ | 87.3 | 88.6 | 88.0 | | _Negative_ | 86.4 | 86.5 | 86.5 | | _Neutral_ | 85.3 | 84.2 | 84.7 | | _Macro-averaged_ | 86.3 | 86.4 | 86.4 |
DeepPavlov/xlm-roberta-large-en-ru-mnli
[ "pytorch", "xlm-roberta", "text-classification", "en", "ru", "dataset:glue", "dataset:mnli", "transformers", "xlm-roberta-large", "xlm-roberta-large-en-ru", "xlm-roberta-large-en-ru-mnli", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -168.47 +/- 71.64 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
DeltaHub/adapter_t5-3b_cola
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - generated_from_trainer model-index: - name: bert-base-uncased-scratch-powo_mgh_pt results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-scratch-powo_mgh_pt This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.5901 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 5 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 40 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 6.3881 | 3.57 | 200 | 5.2653 | | 4.7294 | 7.14 | 400 | 4.6365 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
DeltaHub/lora_t5-base_mrpc
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-19T11:03:06Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: test_trainer results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test_trainer This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9646 - Accuracy: 0.8171 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4452 | 1.0 | 2000 | 0.5505 | 0.7673 | | 0.277 | 2.0 | 4000 | 0.7271 | 0.8210 | | 0.1412 | 3.0 | 6000 | 0.9646 | 0.8171 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Denilson/gbert-base-germaner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-19T11:18:50Z
--- tags: - conversational --- # mawaidhaChatbot Model
Deniskin/gpt3_medium
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- tags: - generated_from_keras_callback model-index: - name: Boglinger/mt5-small-german-finetune-mlsum-klex results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Boglinger/mt5-small-german-finetune-mlsum-klex This model is a fine-tuned version of [ml6team/mt5-small-german-finetune-mlsum](https://huggingface.co/ml6team/mt5-small-german-finetune-mlsum) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.7473 - Validation Loss: 3.3362 - Epoch: 9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5.6e-05, 'decay_steps': 744, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 4.1761 | 3.5532 | 0 | | 3.9847 | 3.4679 | 1 | | 3.9027 | 3.4180 | 2 | | 3.8483 | 3.3838 | 3 | | 3.8075 | 3.3593 | 4 | | 3.7779 | 3.3476 | 5 | | 3.7570 | 3.3393 | 6 | | 3.7446 | 3.3362 | 7 | | 3.7506 | 3.3362 | 8 | | 3.7473 | 3.3362 | 9 | ### Framework versions - Transformers 4.19.2 - TensorFlow 2.8.0 - Datasets 2.2.1 - Tokenizers 0.12.1
Denny29/DialoGPT-medium-asunayuuki
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zhs - zht - zu pipeline_tag: text-generation --- <h1 style='text-align: center '>BLOOM LM</h1> <h2 style='text-align: center '><em>BigScience Large Open-science Open-access Multilingual Language Model</em> </h2> <h3 style='text-align: center '>Model Card</h3> <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> Version 1.0 / 26.May.2022 # Model Card for Bloom-560m <!-- Provide a quick summary of what the model is/does. --> ## Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Recommendations](#recommendations) 5. [Training Data](#training-data) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Technical Specifications](#techincal-specifications) 9. [Citation](#citation) 10. [Glossary and Calculations](#glossary-and-calculations) 11. [More Information](#more-information) 12. [Model Card Authors](#model-card-authors) 13. [Model Card Contact](#model-card-contact) ## Model Details ### Model Description *This section provides information for anyone who wants to know about the model.* - **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) * All collaborators are either volunteers or have an agreement with their employer. *(Further breakdown of participants forthcoming.)* - **Model Type:** Transformer-based Language Model - **Version:** 1.0.0 - **Languages:** Multiple; see [training data](#training-data) - **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license)) - **Release Date Estimate:** Monday, 11.July.2022 - **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* ## Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model. It provides information for anyone considering using the model or who is affected by the model.* ### Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. #### **Direct Use** - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings #### **Downstream Use** - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### **Out-of-scope Uses** Using the model in [high-stakes](#high-stakes) settings is out of scope for this model.  The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but is not correct. ##### Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### **Misuse** Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ### Intended Users #### **Direct Users** - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups #### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) #### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM ## Bias, Risks and Limitations *This section identifies foreseeable harms and misunderstandings.* Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs ### Recommendations *This section provides information on warnings and potential mitigations.* - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models pretrained with the LLM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus). Training data includes: - 45 natural languages - 12 programming languages - In 1.5TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) #### **Languages** The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_chart.svg?raw=true) **The following table shows the further distribution of Niger-Congo and Indic languages in the training data.** | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------ |------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Northern Sotho | 0.0002 | | Malayalam | 0.10 | | Fon | 0.0002 | | Urdu | 0.10 | | Kirundi | 0.0003 | | Tamil | 0.20 | | Wolof | 0.0004 | | Bengali | 0.50 | | Kuganda | 0.0004 | | Hindi | 0.70 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | **The following table shows the distribution of programming languages.** | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | ## Evaluation *This section describes the evaluation protocols and provides the results.* ### Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ### Factors *This section lists some different aspects of what BLOOM models. Its focus is on those aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ### Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Train-time Evaluation:** As of 25.May.2022, 15:00 PST: - Training Loss: 2.0 - Validation Loss: 2.2 - Perplexity: 8.9 (More evaluation scores forthcoming at the end of model training.) ## Environmental Impact The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming upon completion of training.)* **Estimated electricity usage:** *(Forthcoming upon completion of training.)* ## Technical Specifications *This section provides information for people who work on model development.* Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. **Model Architecture:** Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 559,214,592 parameters: * 256,901,120 embedding parameters * 24 layers, 16 attention heads * Hidden layers are 1024-dimensional * Sequence length of 2048 tokens (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). **Compute infrastructure:** Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). * Hardware: 384 A100 80GB GPUs (48 nodes): * Additional 32 A100 80GB GPUs (4 nodes) in reserve * 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links * CPU: AMD * CPU memory: 512GB per node * GPU memory: 640GB per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes * Software: * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) ### **Training** Training logs: [Tensorboard link](https://huggingface.co/bigscience/tr11e-350M-logs) - Training throughput: About 150 TFLOPs per GPU - Number of epochs: 1 (*current target*) - Dates: - Started 11th March, 2022 11:42am PST - Ended 5th July, 2022 - Estimated cost of training: Equivalent of $2-5M in cloud computing (including preliminary experiments and other model sizes) - Server training location: Île-de-France, France ### **Tokenization** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)) is a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. ## Citation **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 ## Glossary and Calculations *This section defines common terms and how metrics are calculated.* - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. ## More Information ### Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ### Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ### Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book ## Model Card Authors *Ordered roughly chronologically and by amount of time spent.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff ## Model Card Contact **Send Questions to:** [email protected]
Denver/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zhs - zht - zu pipeline_tag: text-generation --- <h1 style='text-align: center '>BLOOM LM</h1> <h2 style='text-align: center '><em>BigScience Large Open-science Open-access Multilingual Language Model</em> </h2> <h3 style='text-align: center '>Model Card</h3> <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> Version 1.0 / 26.May.2022 ## Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Training Data](#training-data) 4. [Risks and Limitations](#risks-and-limitations) 5. [Evaluation](#evaluation) 6. [Recommendations](#recommendations) 7. [Glossary and Calculations](#glossary-and-calculations) 8. [More Information](#more-information) 9. [Model Card Authors](#model-card-authors) ## Model Details ### Basics *This section provides information for anyone who wants to know about the model.* <details> <summary>Click to expand</summary> <br/> **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) * All collaborators are either volunteers or have an agreement with their employer. *(Further breakdown of participants forthcoming.)* **Model Type:** Transformer-based Language Model **Version:** 1.0.0 **Languages:** Multiple; see [training data](#training-data) **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license)) **Release Date Estimate:** Monday, 11.July.2022 **Send Questions to:** [email protected] **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* </details> ### Technical Specifications *This section provides information for people who work on model development.* <details> <summary>Click to expand</summary><br/> Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. **Model Architecture:** Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 1,065,314,304 parameters: * 385,351,680 embedding parameters * 24 layers, 16 attention heads * Hidden layers are 1536-dimensional * Sequence length of 2048 tokens used (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). **Compute infrastructure:** Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). * Hardware: 384 A100 80GB GPUs (48 nodes): * Additional 32 A100 80GB GPUs (4 nodes) in reserve * 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links * CPU: AMD * CPU memory: 512GB per node * GPU memory: 640GB per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes * Software: * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) #### **Training** Training logs: [Tensorboard link](https://huggingface.co/tensorboard/bigscience/tr11d-760M-logs) - Number of epochs: 1 - Dates: - Started 11th March, 2022 11:42am PST - Ended 5th July, 2022 - Estimated cost of training: Equivalent of $2-5M in cloud computing (including preliminary experiments and other model sizes) - Server training location: Île-de-France, France #### **Tokenization** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)) is a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. </details> ### Environmental Impact <details> <summary>Click to expand</summary><br/> The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming upon completion of training.)* **Estimated electricity usage:** *(Forthcoming upon completion of training.)* </details> <p>&nbsp;</p> ## Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model. It provides information for anyone considering using the model or who is affected by the model.* <details> <summary>Click to expand</summary><br/> ### Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. #### **Direct Use** - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings #### **Downstream Use** - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### **Out-of-scope Uses** Using the model in [high-stakes](#high-stakes) settings is out of scope for this model.  The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but is not correct. ##### Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### **Misuse** Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ### Intended Users #### **Direct Users** - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups #### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) #### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM </details> <p>&nbsp;</p> ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* <details> <summary>Click to expand</summary><br/> Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus). Training data includes: - 45 natural languages - 12 programming languages - In 1.5TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) #### **Languages** The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_chart.svg?raw=true) The following table shows the further distribution of Niger-Congo and Indic languages in the training data. <details> <summary>Click to expand</summary><br/> | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------ |------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Northern Sotho | 0.0002 | | Malayalam | 0.10 | | Fon | 0.0002 | | Urdu | 0.10 | | Kirundi | 0.0003 | | Tamil | 0.20 | | Wolof | 0.0004 | | Bengali | 0.50 | | Kuganda | 0.0004 | | Hindi | 0.70 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | </details> The following table shows the distribution of programming languages. <details> <summary>Click to expand</summary><br/> | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | </details> </details> <p>&nbsp;</p> ## Risks and Limitations *This section identifies foreseeable harms and misunderstandings.* <details> <summary>Click to expand</summary><br/> Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs </details> <p>&nbsp;</p> ## Evaluation *This section describes the evaluation protocols and provides the results.* <details> <summary>Click to expand</summary><br/> ### Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ### Factors *This section lists some different aspects of BLOOM models. Its focus is on those aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ### Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Train-time Evaluation:** As of 25.May.2022, 15:00 PST: - Training Loss: 2.7 - Validation Loss: 3.1 - Perplexity: 21.9 (More evaluation scores forthcoming at the end of model training.) </details> <p>&nbsp;</p> ## Recommendations *This section provides information on warnings and potential mitigations.* <details> <summary>Click to expand</summary><br/> - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models pretrained with the LLM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. </details> <p>&nbsp;</p> ## Glossary and Calculations *This section defines common terms and how metrics are calculated.* <details> <summary>Click to expand</summary><br/> - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. </details> <p>&nbsp;</p> ## More Information <details> <summary>Click to expand</summary><br/> ### Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ### Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ### Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book </details> <p>&nbsp;</p> ## Model Card Authors *Ordered roughly chronologically and by amount of time spent.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff
DeskDown/MarianMixFT_en-fil
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zhs - zht - zu pipeline_tag: text-generation --- <h1 style='text-align: center '>BLOOM LM</h1> <h2 style='text-align: center '><em>BigScience Large Open-science Open-access Multilingual Language Model</em> </h2> <h3 style='text-align: center '>Model Card</h3> <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> Version 1.0 / 26.May.2022 # Model Card for Bloom-1b7 <!-- Provide a quick summary of what the model is/does. --> ## Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Bias, Risks, and Limitations](#bias-risks-and-limitations) 4. [Recommendations](#recommendations) 5. [Training Data](#training-data) 6. [Evaluation](#evaluation) 7. [Environmental Impact](#environmental-impact) 8. [Technical Specifications](#techincal-specifications) 9. [Citation](#citation) 10. [Glossary and Calculations](#glossary-and-calculations) 11. [More Information](#more-information) 12. [Model Card Authors](#model-card-authors) 13. [Model Card Contact](#model-card-contact) ## Model Details ### Model Description *This section provides information for anyone who wants to know about the model.* - **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) * All collaborators are either volunteers or have an agreement with their employer. *(Further breakdown of participants forthcoming.)* - **Model Type:** Transformer-based Language Model - **Version:** 1.0.0 - **Languages:** Multiple; see [training data](#training-data) - **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license)) - **Release Date Estimate:** Monday, 11.July.2022 - **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* ## Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model. It provides information for anyone considering using the model or who is affected by the model.* ### Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. #### **Direct Use** - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings #### **Downstream Use** - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### **Out-of-scope Uses** Using the model in [high-stakes](#high-stakes) settings is out of scope for this model.  The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but is not correct. ##### Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### **Misuse** Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ### Intended Users #### **Direct Users** - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups #### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) #### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM ## Bias, Risks, and Limitations *This section identifies foreseeable harms and misunderstandings.* Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs ### Recommendations *This section provides information on warnings and potential mitigations.* - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models pretrained with the LLM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus). Training data includes: - 45 natural languages - 12 programming languages - In 1.5TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) #### **Languages** The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_chart.svg?raw=true) **The following table shows the further distribution of Niger-Congo and Indic languages in the training data.** | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------ |------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Northern Sotho | 0.0002 | | Malayalam | 0.10 | | Fon | 0.0002 | | Urdu | 0.10 | | Kirundi | 0.0003 | | Tamil | 0.20 | | Wolof | 0.0004 | | Bengali | 0.50 | | Kuganda | 0.0004 | | Hindi | 0.70 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | </details> **The following table shows the distribution of programming languages.** | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | ## Evaluation *This section describes the evaluation protocols and provides the results.* ### Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ### Factors *This section lists some different aspects of what BLOOM models. Its focus is on those aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ### Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Train-time Evaluation:** As of 25.May.2022, 15:00 PST: - Training Loss: 2.0 - Validation Loss: 2.2 - Perplexity: 8.9 (More evaluation scores forthcoming at the end of model training.) - [BLOOM Book](https://huggingface.co/spaces/bigscience/bloom-book): Read generations from BLOOM based on prompts provided by the community ## Environmental Impact The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming upon completion of training.)* **Estimated electricity usage:** *(Forthcoming upon completion of training.)* ## Technical Specifications *This section provides information for people who work on model development.* Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. **Model Architecture:** Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 1,722,408,960 parameters: * 513,802,240 embedding parameters * 24 layers, 16 attention heads * Hidden layers are 2048-dimensional * Sequence length of 2048 tokens used (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). **Compute infrastructure:** Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). * Hardware: 64 V100 16/32GB GPUs (16 nodes): * 4 GPUs per node * 40 CPUs per task * 1 task per node * CPU: AMD * CPU memory: 160GB per node * GPU memory: 64GB or 128GB (depending on node availability during training) per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes * Software: * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) ### **Training** - Checkpoint size: - Fp16 weights: 2.6GB (# params * 2) - Full checkpoint with optimizer states: -- - Training throughput: -- - Number of epochs: 1 - Dates: - Start: 11th March, 2022 11:42am PST - End: 20 May, 2022 - Server training location: Île-de-France, France ### **Tokenization** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)) is a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. ## Citation **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 ## Glossary and Calculations *This section defines common terms and how metrics are calculated.* - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. ## More Information ### Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ### Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ### Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book ## Model Card Authors *Ordered roughly chronologically and by amount of time spent.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff ## Model Card Contact **Send Questions to:** [email protected]
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-19T11:52:27Z
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zhs - zht - zu pipeline_tag: text-generation model-index: - name: bloom results: - task: type: text-generation name: text generation dataset: name: arc_challenge type: arc_challenge metrics: - name: acc type: acc value: 0.27986348122866894 verified: false - task: type: text-generation name: text generation dataset: name: arc_easy type: arc_easy metrics: - name: acc type: acc value: 0.5946969696969697 verified: false - task: type: text-generation name: text generation dataset: name: axb type: axb metrics: - name: acc type: acc value: 0.4433876811594203 verified: false - task: type: text-generation name: text generation dataset: name: axg type: axg metrics: - name: acc type: acc value: 0.5 verified: false - task: type: text-generation name: text generation dataset: name: boolq type: boolq metrics: - name: acc type: acc value: 0.6165137614678899 verified: false - task: type: text-generation name: text generation dataset: name: cb type: cb metrics: - name: acc type: acc value: 0.30357142857142855 verified: false - task: type: text-generation name: text generation dataset: name: cola type: cola metrics: - name: acc type: acc value: 0.610738255033557 verified: false - task: type: text-generation name: text generation dataset: name: copa type: copa metrics: - name: acc type: acc value: 0.63 verified: false - task: type: text-generation name: text generation dataset: name: crows_pairs_english type: crows_pairs_english metrics: - name: acc type: acc value: 0.4973166368515206 verified: false - task: type: text-generation name: text generation dataset: name: crows_pairs_french type: crows_pairs_french metrics: - name: acc type: acc value: 0.5032796660703638 verified: false - task: type: text-generation name: text generation dataset: name: diabla type: diabla metrics: - name: acc type: acc value: 0.28888308977035493 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_afr type: gsarti/flores_101_afr metrics: - name: byte_perplexity type: byte_perplexity value: 6.500798737976343 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_amh type: gsarti/flores_101_amh metrics: - name: byte_perplexity type: byte_perplexity value: 3.9726863338897145 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ara type: gsarti/flores_101_ara metrics: - name: byte_perplexity type: byte_perplexity value: 1.8083841089875814 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_asm type: gsarti/flores_101_asm metrics: - name: byte_perplexity type: byte_perplexity value: 5.699102962086425 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ast type: gsarti/flores_101_ast metrics: - name: byte_perplexity type: byte_perplexity value: 3.9252047073429384 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_azj type: gsarti/flores_101_azj metrics: - name: byte_perplexity type: byte_perplexity value: 6.942805054270002 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_bel type: gsarti/flores_101_bel metrics: - name: byte_perplexity type: byte_perplexity value: 3.614136245847082 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ben type: gsarti/flores_101_ben metrics: - name: byte_perplexity type: byte_perplexity value: 5.121491534300969 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_bos type: gsarti/flores_101_bos metrics: - name: byte_perplexity type: byte_perplexity value: 5.653353469118798 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_bul type: gsarti/flores_101_bul metrics: - name: byte_perplexity type: byte_perplexity value: 2.7014693938055068 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_cat type: gsarti/flores_101_cat metrics: - name: byte_perplexity type: byte_perplexity value: 2.305190041967345 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ceb type: gsarti/flores_101_ceb metrics: - name: byte_perplexity type: byte_perplexity value: 6.291000321323428 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ces type: gsarti/flores_101_ces metrics: - name: byte_perplexity type: byte_perplexity value: 5.447322753586386 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ckb type: gsarti/flores_101_ckb metrics: - name: byte_perplexity type: byte_perplexity value: 3.7255124939234765 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_cym type: gsarti/flores_101_cym metrics: - name: byte_perplexity type: byte_perplexity value: 12.539424151448149 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_dan type: gsarti/flores_101_dan metrics: - name: byte_perplexity type: byte_perplexity value: 5.183309001005672 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_deu type: gsarti/flores_101_deu metrics: - name: byte_perplexity type: byte_perplexity value: 3.1180422286591347 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ell type: gsarti/flores_101_ell metrics: - name: byte_perplexity type: byte_perplexity value: 2.467943456164706 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_eng type: gsarti/flores_101_eng metrics: - name: byte_perplexity type: byte_perplexity value: 2.018740628193298 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_est type: gsarti/flores_101_est metrics: - name: byte_perplexity type: byte_perplexity value: 9.11654425176368 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_fas type: gsarti/flores_101_fas metrics: - name: byte_perplexity type: byte_perplexity value: 3.058009097116482 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_fin type: gsarti/flores_101_fin metrics: - name: byte_perplexity type: byte_perplexity value: 6.847047959628553 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_fra type: gsarti/flores_101_fra metrics: - name: byte_perplexity type: byte_perplexity value: 1.9975177011840075 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ful type: gsarti/flores_101_ful metrics: - name: byte_perplexity type: byte_perplexity value: 11.465912731488828 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_gle type: gsarti/flores_101_gle metrics: - name: byte_perplexity type: byte_perplexity value: 8.681491663539422 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_glg type: gsarti/flores_101_glg metrics: - name: byte_perplexity type: byte_perplexity value: 3.029991089015508 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_guj type: gsarti/flores_101_guj metrics: - name: byte_perplexity type: byte_perplexity value: 4.955224230286231 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_hau type: gsarti/flores_101_hau metrics: - name: byte_perplexity type: byte_perplexity value: 10.758347356372159 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_heb type: gsarti/flores_101_heb metrics: - name: byte_perplexity type: byte_perplexity value: 3.6004478129801667 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_hin type: gsarti/flores_101_hin metrics: - name: byte_perplexity type: byte_perplexity value: 4.712530650588064 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_hrv type: gsarti/flores_101_hrv metrics: - name: byte_perplexity type: byte_perplexity value: 5.822418943372185 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_hun type: gsarti/flores_101_hun metrics: - name: byte_perplexity type: byte_perplexity value: 6.440482646965992 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_hye type: gsarti/flores_101_hye metrics: - name: byte_perplexity type: byte_perplexity value: 3.657718918347166 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ibo type: gsarti/flores_101_ibo metrics: - name: byte_perplexity type: byte_perplexity value: 5.564814003872672 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ind type: gsarti/flores_101_ind metrics: - name: byte_perplexity type: byte_perplexity value: 2.1597101468869373 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_isl type: gsarti/flores_101_isl metrics: - name: byte_perplexity type: byte_perplexity value: 8.082349269518136 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ita type: gsarti/flores_101_ita metrics: - name: byte_perplexity type: byte_perplexity value: 2.9687591414176207 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_jav type: gsarti/flores_101_jav metrics: - name: byte_perplexity type: byte_perplexity value: 7.0573805415708994 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_jpn type: gsarti/flores_101_jpn metrics: - name: byte_perplexity type: byte_perplexity value: 2.7758864197116933 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kam type: gsarti/flores_101_kam metrics: - name: byte_perplexity type: byte_perplexity value: 11.072949642861332 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kan type: gsarti/flores_101_kan metrics: - name: byte_perplexity type: byte_perplexity value: 5.551730651007082 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kat type: gsarti/flores_101_kat metrics: - name: byte_perplexity type: byte_perplexity value: 2.522630524283745 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kaz type: gsarti/flores_101_kaz metrics: - name: byte_perplexity type: byte_perplexity value: 3.3901748516975574 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kea type: gsarti/flores_101_kea metrics: - name: byte_perplexity type: byte_perplexity value: 8.918534182590863 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kir type: gsarti/flores_101_kir metrics: - name: byte_perplexity type: byte_perplexity value: 3.729278369847201 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_kor type: gsarti/flores_101_kor metrics: - name: byte_perplexity type: byte_perplexity value: 3.932884847226212 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_lao type: gsarti/flores_101_lao metrics: - name: byte_perplexity type: byte_perplexity value: 2.9077314760849924 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_lav type: gsarti/flores_101_lav metrics: - name: byte_perplexity type: byte_perplexity value: 7.777221919194806 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_lin type: gsarti/flores_101_lin metrics: - name: byte_perplexity type: byte_perplexity value: 7.524842908050988 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_lit type: gsarti/flores_101_lit metrics: - name: byte_perplexity type: byte_perplexity value: 7.369179434621725 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ltz type: gsarti/flores_101_ltz metrics: - name: byte_perplexity type: byte_perplexity value: 8.801059747949214 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_lug type: gsarti/flores_101_lug metrics: - name: byte_perplexity type: byte_perplexity value: 8.483203026364786 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_luo type: gsarti/flores_101_luo metrics: - name: byte_perplexity type: byte_perplexity value: 11.975963093623681 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mal type: gsarti/flores_101_mal metrics: - name: byte_perplexity type: byte_perplexity value: 4.615948455160037 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mar type: gsarti/flores_101_mar metrics: - name: byte_perplexity type: byte_perplexity value: 5.483253482821379 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mkd type: gsarti/flores_101_mkd metrics: - name: byte_perplexity type: byte_perplexity value: 2.9656732291754087 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mlt type: gsarti/flores_101_mlt metrics: - name: byte_perplexity type: byte_perplexity value: 15.004773437665275 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mon type: gsarti/flores_101_mon metrics: - name: byte_perplexity type: byte_perplexity value: 3.410598542315402 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mri type: gsarti/flores_101_mri metrics: - name: byte_perplexity type: byte_perplexity value: 7.474035895661322 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_msa type: gsarti/flores_101_msa metrics: - name: byte_perplexity type: byte_perplexity value: 2.5710001772665634 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_mya type: gsarti/flores_101_mya metrics: - name: byte_perplexity type: byte_perplexity value: 2.413577969878331 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_nld type: gsarti/flores_101_nld metrics: - name: byte_perplexity type: byte_perplexity value: 4.127831721885065 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_nob type: gsarti/flores_101_nob metrics: - name: byte_perplexity type: byte_perplexity value: 5.402763169129877 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_npi type: gsarti/flores_101_npi metrics: - name: byte_perplexity type: byte_perplexity value: 5.199342701937889 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_nso type: gsarti/flores_101_nso metrics: - name: byte_perplexity type: byte_perplexity value: 8.154626800955667 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_nya type: gsarti/flores_101_nya metrics: - name: byte_perplexity type: byte_perplexity value: 8.179860208369393 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_oci type: gsarti/flores_101_oci metrics: - name: byte_perplexity type: byte_perplexity value: 4.8617357393685845 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_orm type: gsarti/flores_101_orm metrics: - name: byte_perplexity type: byte_perplexity value: 12.911595421079408 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ory type: gsarti/flores_101_ory metrics: - name: byte_perplexity type: byte_perplexity value: 5.189421861225964 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_pan type: gsarti/flores_101_pan metrics: - name: byte_perplexity type: byte_perplexity value: 4.698477289331806 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_pol type: gsarti/flores_101_pol metrics: - name: byte_perplexity type: byte_perplexity value: 4.625550458479643 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_por type: gsarti/flores_101_por metrics: - name: byte_perplexity type: byte_perplexity value: 1.9754515986213523 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_pus type: gsarti/flores_101_pus metrics: - name: byte_perplexity type: byte_perplexity value: 4.4963371422771585 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ron type: gsarti/flores_101_ron metrics: - name: byte_perplexity type: byte_perplexity value: 4.965456830031304 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_rus type: gsarti/flores_101_rus metrics: - name: byte_perplexity type: byte_perplexity value: 2.0498020542445303 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_slk type: gsarti/flores_101_slk metrics: - name: byte_perplexity type: byte_perplexity value: 6.450822127057479 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_slv type: gsarti/flores_101_slv metrics: - name: byte_perplexity type: byte_perplexity value: 6.620252120186232 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_sna type: gsarti/flores_101_sna metrics: - name: byte_perplexity type: byte_perplexity value: 8.462166771382726 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_snd type: gsarti/flores_101_snd metrics: - name: byte_perplexity type: byte_perplexity value: 5.466066951221973 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_som type: gsarti/flores_101_som metrics: - name: byte_perplexity type: byte_perplexity value: 11.95918054093392 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_spa type: gsarti/flores_101_spa metrics: - name: byte_perplexity type: byte_perplexity value: 1.8965140104323535 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_srp type: gsarti/flores_101_srp metrics: - name: byte_perplexity type: byte_perplexity value: 2.871214785885079 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_swe type: gsarti/flores_101_swe metrics: - name: byte_perplexity type: byte_perplexity value: 5.054972008155866 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_swh type: gsarti/flores_101_swh metrics: - name: byte_perplexity type: byte_perplexity value: 3.6973091886730676 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tam type: gsarti/flores_101_tam metrics: - name: byte_perplexity type: byte_perplexity value: 4.539493400469833 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tel type: gsarti/flores_101_tel metrics: - name: byte_perplexity type: byte_perplexity value: 5.807499987508966 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tgk type: gsarti/flores_101_tgk metrics: - name: byte_perplexity type: byte_perplexity value: 3.5994818827380426 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tgl type: gsarti/flores_101_tgl metrics: - name: byte_perplexity type: byte_perplexity value: 5.667053833119858 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tha type: gsarti/flores_101_tha metrics: - name: byte_perplexity type: byte_perplexity value: 2.365940201944242 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_tur type: gsarti/flores_101_tur metrics: - name: byte_perplexity type: byte_perplexity value: 4.885014749844601 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_ukr type: gsarti/flores_101_ukr metrics: - name: byte_perplexity type: byte_perplexity value: 2.7240934990288483 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_umb type: gsarti/flores_101_umb metrics: - name: byte_perplexity type: byte_perplexity value: 12.766915508610673 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_urd type: gsarti/flores_101_urd metrics: - name: byte_perplexity type: byte_perplexity value: 1.9797467071381232 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_uzb type: gsarti/flores_101_uzb metrics: - name: byte_perplexity type: byte_perplexity value: 12.002337637722146 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_vie type: gsarti/flores_101_vie metrics: - name: byte_perplexity type: byte_perplexity value: 1.76578415476397 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_wol type: gsarti/flores_101_wol metrics: - name: byte_perplexity type: byte_perplexity value: 9.144285650306488 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_xho type: gsarti/flores_101_xho metrics: - name: byte_perplexity type: byte_perplexity value: 7.403240538286952 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_yor type: gsarti/flores_101_yor metrics: - name: byte_perplexity type: byte_perplexity value: 5.91272037551173 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_zho_simpl type: gsarti/flores_101_zho_simpl metrics: - name: byte_perplexity type: byte_perplexity value: 2.2769070822768533 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_zho_trad type: gsarti/flores_101_zho_trad metrics: - name: byte_perplexity type: byte_perplexity value: 2.5180582198242383 verified: false - task: type: text-generation name: text generation dataset: name: gsarti/flores_101_zul type: gsarti/flores_101_zul metrics: - name: byte_perplexity type: byte_perplexity value: 8.53353320693145 verified: false - task: type: text-generation name: text generation dataset: name: headqa type: headqa metrics: - name: acc type: acc value: 0.26440554339897887 verified: false - task: type: text-generation name: text generation dataset: name: hellaswag type: hellaswag metrics: - name: acc type: acc value: 0.41236805417247563 verified: false - task: type: text-generation name: text generation dataset: name: logiqa type: logiqa metrics: - name: acc type: acc value: 0.2073732718894009 verified: false - task: type: text-generation name: text generation dataset: name: mathqa type: mathqa metrics: - name: acc type: acc value: 0.24958123953098826 verified: false - task: type: text-generation name: text generation dataset: name: mc_taco type: mc_taco metrics: - name: em type: em value: 0.11936936936936937 verified: false - task: type: text-generation name: text generation dataset: name: mnli type: mnli metrics: - name: acc type: acc value: 0.35496688741721855 verified: false - task: type: text-generation name: text generation dataset: name: mnli_mismatched type: mnli_mismatched metrics: - name: acc type: acc value: 0.35211554109031734 verified: false - task: type: text-generation name: text generation dataset: name: mrpc type: mrpc metrics: - name: acc type: acc value: 0.5857843137254902 verified: false - task: type: text-generation name: text generation dataset: name: multirc type: multirc metrics: - name: acc type: acc value: 0.5375412541254125 verified: false - task: type: text-generation name: text generation dataset: name: openbookqa type: openbookqa metrics: - name: acc type: acc value: 0.216 verified: false - task: type: text-generation name: text generation dataset: name: piqa type: piqa metrics: - name: acc type: acc value: 0.7078346028291621 verified: false - task: type: text-generation name: text generation dataset: name: prost type: prost metrics: - name: acc type: acc value: 0.22683603757472245 verified: false - task: type: text-generation name: text generation dataset: name: pubmedqa type: pubmedqa metrics: - name: acc type: acc value: 0.616 verified: false - task: type: text-generation name: text generation dataset: name: qnli type: qnli metrics: - name: acc type: acc value: 0.5072304594545122 verified: false - task: type: text-generation name: text generation dataset: name: qqp type: qqp metrics: - name: acc type: acc value: 0.3842443729903537 verified: false - task: type: text-generation name: text generation dataset: name: race type: race metrics: - name: acc type: acc value: 0.3521531100478469 verified: false - task: type: text-generation name: text generation dataset: name: rte type: rte metrics: - name: acc type: acc value: 0.47653429602888087 verified: false - task: type: text-generation name: text generation dataset: name: sciq type: sciq metrics: - name: acc type: acc value: 0.892 verified: false - task: type: text-generation name: text generation dataset: name: sst type: sst metrics: - name: acc type: acc value: 0.5177752293577982 verified: false - task: type: text-generation name: text generation dataset: name: triviaqa type: triviaqa metrics: - name: acc type: acc value: 0.041633518960487934 verified: false - task: type: text-generation name: text generation dataset: name: tydiqa_primary type: tydiqa_primary metrics: - name: acc type: acc value: 0.3011337608795236 verified: false - task: type: text-generation name: text generation dataset: name: webqs type: webqs metrics: - name: acc type: acc value: 0.01673228346456693 verified: false - task: type: text-generation name: text generation dataset: name: wic type: wic metrics: - name: acc type: acc value: 0.5015673981191222 verified: false - task: type: text-generation name: text generation dataset: name: winogrande type: winogrande metrics: - name: acc type: acc value: 0.5864246250986582 verified: false - task: type: text-generation name: text generation dataset: name: wnli type: wnli metrics: - name: acc type: acc value: 0.471830985915493 verified: false - task: type: text-generation name: text generation dataset: name: wsc type: wsc metrics: - name: acc type: acc value: 0.4423076923076923 verified: false - task: type: text-generation name: text generation dataset: name: humaneval type: humaneval metrics: - name: pass@1 type: pass@1 value: 0.15524390243902436 verified: false - name: pass@10 type: pass@10 value: 0.3220367632383857 verified: false - name: pass@100 type: pass@100 value: 0.5545431515723145 verified: false --- <h1 style='text-align: center '>BLOOM LM</h1> <h2 style='text-align: center '><em>BigScience Large Open-science Open-access Multilingual Language Model</em> </h2> <h3 style='text-align: center '>Model Card</h3> <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> Version 1.0 / 26.May.2022 ## Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Training Data](#training-data) 4. [Risks and Limitations](#risks-and-limitations) 5. [Evaluation](#evaluation) 6. [Recommendations](#recommendations) 7. [Glossary and Calculations](#glossary-and-calculations) 8. [More Information](#more-information) 9. [Model Card Authors](#model-card-authors) ## Model Details ### Basics *This section provides information for anyone who wants to know about the model.* <details> <summary>Click to expand</summary> <br/> **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) * All collaborators are either volunteers or have an agreement with their employer. *(Further breakdown of participants forthcoming.)* **Model Type:** Transformer-based Language Model **Version:** 1.0.0 **Languages:** Multiple; see [training data](#training-data) **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license)) **Release Date Estimate:** Monday, 11.July.2022 **Send Questions to:** [email protected] **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* </details> ### Technical Specifications *This section provides information for people who work on model development.* <details> <summary>Click to expand</summary><br/> Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. **Model Architecture:** Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 3,002,557,440 parameters: * 642,252,800 embedding parameters * 30 layers, 32 attention heads * Hidden layers are 2560-dimensional * Sequence length of 2048 tokens used (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). **Compute infrastructure:** Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). * Hardware: 384 A100 80GB GPUs (48 nodes): * Additional 32 A100 80GB GPUs (4 nodes) in reserve * 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links * CPU: AMD * CPU memory: 512GB per node * GPU memory: 640GB per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes * Software: * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) #### **Training** Training logs: [Tensorboard link](https://huggingface.co/tensorboard/bigscience/tr11c-2B5-logs) - Number of epochs: 1 (*current target*) - Dates: - Started 11th March, 2022 11:42am PST - Ended 5th July, 2022 - Estimated cost of training: Equivalent of $2-5M in cloud computing (including preliminary experiments) - Server training location: Île-de-France, France #### **Tokenization** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)) is a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. </details> ### Environmental Impact <details> <summary>Click to expand</summary><br/> The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming upon completion of training.)* **Estimated electricity usage:** *(Forthcoming upon completion of training.)* </details> <p>&nbsp;</p> ## Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model. It provides information for anyone considering using the model or who is affected by the model.* <details> <summary>Click to expand</summary><br/> ### Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. #### **Direct Use** - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings #### **Downstream Use** - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### **Out-of-scope Uses** Using the model in [high-stakes](#high-stakes) settings is out of scope for this model.  The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but is not correct. ##### Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### **Misuse** Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ### Intended Users #### **Direct Users** - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups #### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) #### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM </details> <p>&nbsp;</p> ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* <details> <summary>Click to expand</summary><br/> Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus). Training data includes: - 45 natural languages - 12 programming languages - In 1.5TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) #### **Languages** The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_chart.svg?raw=true) The following table shows the further distribution of Niger-Congo and Indic languages in the training data. <details> <summary>Click to expand</summary><br/> | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------ |------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Northern Sotho | 0.0002 | | Malayalam | 0.10 | | Fon | 0.0002 | | Urdu | 0.10 | | Kirundi | 0.0003 | | Tamil | 0.20 | | Wolof | 0.0004 | | Bengali | 0.50 | | Kuganda | 0.0004 | | Hindi | 0.70 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | </details> The following table shows the distribution of programming languages. <details> <summary>Click to expand</summary><br/> | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | </details> </details> <p>&nbsp;</p> ## Risks and Limitations *This section identifies foreseeable harms and misunderstandings.* <details> <summary>Click to expand</summary><br/> Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs </details> <p>&nbsp;</p> ## Evaluation *This section describes the evaluation protocols and provides the results.* <details> <summary>Click to expand</summary><br/> ### Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ### Factors *This section lists some different aspects of BLOOM models. Its focus is on aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ### Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Zero-shot evaluations:** See this repository for JSON files: https://github.com/bigscience-workshop/evaluation-results | Task | Language | Metric | BLOOM-2B5 | |:----|:----|:----|:----:| | arc_challenge | eng | acc ↑ | 0.28 | | arc_easy | eng | acc ↑ | 0.595 | | axb (Median of 10 prompts) | eng | acc ↑ | 0.443 | | axg (Median of 10 prompts) | eng | acc ↑ | 0.5 | | boolq (Median of 11 prompts) | eng | acc ↑ | 0.617 | | cb (Median of 15 prompts) | eng | acc ↑ | 0.304 | | cola (Median of 5 prompts) | eng | acc ↑ | 0.611 | | copa (Median of 9 prompts) | eng | acc ↑ | 0.63 | | crows_pairs_english (Median of 6 prompts) | eng | acc ↑ | 0.497 | | crows_pairs_french (Median of 7 prompts) | fra | acc ↑ | 0.503 | | diabla (Median of 2 prompts) | eng | acc ↑ | 0.289 | | gsarti/flores_101_afr | afr | byte_perplexity ↓ | 6.501 | | gsarti/flores_101_amh | amh | byte_perplexity ↓ | 3.973 | | gsarti/flores_101_ara | ara | byte_perplexity ↓ | 1.808 | | gsarti/flores_101_asm | asm | byte_perplexity ↓ | 5.699 | | gsarti/flores_101_ast | ast | byte_perplexity ↓ | 3.925 | | gsarti/flores_101_azj | azj | byte_perplexity ↓ | 6.943 | | gsarti/flores_101_bel | bel | byte_perplexity ↓ | 3.614 | | gsarti/flores_101_ben | ben | byte_perplexity ↓ | 5.121 | | gsarti/flores_101_bos | bos | byte_perplexity ↓ | 5.653 | | gsarti/flores_101_bul | bul | byte_perplexity ↓ | 2.701 | | gsarti/flores_101_cat | cat | byte_perplexity ↓ | 2.305 | | gsarti/flores_101_ceb | ceb | byte_perplexity ↓ | 6.291 | | gsarti/flores_101_ces | ces | byte_perplexity ↓ | 5.447 | | gsarti/flores_101_ckb | ckb | byte_perplexity ↓ | 3.726 | | gsarti/flores_101_cym | cym | byte_perplexity ↓ | 12.539 | | gsarti/flores_101_dan | dan | byte_perplexity ↓ | 5.183 | | gsarti/flores_101_deu | deu | byte_perplexity ↓ | 3.118 | | gsarti/flores_101_ell | ell | byte_perplexity ↓ | 2.468 | | gsarti/flores_101_eng | eng | byte_perplexity ↓ | 2.019 | | gsarti/flores_101_est | est | byte_perplexity ↓ | 9.117 | | gsarti/flores_101_fas | fas | byte_perplexity ↓ | 3.058 | | gsarti/flores_101_fin | fin | byte_perplexity ↓ | 6.847 | | gsarti/flores_101_fra | fra | byte_perplexity ↓ | 1.998 | | gsarti/flores_101_ful | ful | byte_perplexity ↓ | 11.466 | | gsarti/flores_101_gle | gle | byte_perplexity ↓ | 8.681 | | gsarti/flores_101_glg | glg | byte_perplexity ↓ | 3.03 | | gsarti/flores_101_guj | guj | byte_perplexity ↓ | 4.955 | | gsarti/flores_101_hau | hau | byte_perplexity ↓ | 10.758 | | gsarti/flores_101_heb | heb | byte_perplexity ↓ | 3.6 | | gsarti/flores_101_hin | hin | byte_perplexity ↓ | 4.713 | | gsarti/flores_101_hrv | hrv | byte_perplexity ↓ | 5.822 | | gsarti/flores_101_hun | hun | byte_perplexity ↓ | 6.44 | | gsarti/flores_101_hye | hye | byte_perplexity ↓ | 3.658 | | gsarti/flores_101_ibo | ibo | byte_perplexity ↓ | 5.565 | | gsarti/flores_101_ind | ind | byte_perplexity ↓ | 2.16 | | gsarti/flores_101_isl | isl | byte_perplexity ↓ | 8.082 | | gsarti/flores_101_ita | ita | byte_perplexity ↓ | 2.969 | | gsarti/flores_101_jav | jav | byte_perplexity ↓ | 7.057 | | gsarti/flores_101_jpn | jpn | byte_perplexity ↓ | 2.776 | | gsarti/flores_101_kam | kam | byte_perplexity ↓ | 11.073 | | gsarti/flores_101_kan | kan | byte_perplexity ↓ | 5.552 | | gsarti/flores_101_kat | kat | byte_perplexity ↓ | 2.523 | | gsarti/flores_101_kaz | kaz | byte_perplexity ↓ | 3.39 | | gsarti/flores_101_kea | kea | byte_perplexity ↓ | 8.919 | | gsarti/flores_101_kir | kir | byte_perplexity ↓ | 3.729 | | gsarti/flores_101_kor | kor | byte_perplexity ↓ | 3.933 | | gsarti/flores_101_lao | lao | byte_perplexity ↓ | 2.908 | | gsarti/flores_101_lav | lav | byte_perplexity ↓ | 7.777 | | gsarti/flores_101_lin | lin | byte_perplexity ↓ | 7.525 | | gsarti/flores_101_lit | lit | byte_perplexity ↓ | 7.369 | | gsarti/flores_101_ltz | ltz | byte_perplexity ↓ | 8.801 | | gsarti/flores_101_lug | lug | byte_perplexity ↓ | 8.483 | | gsarti/flores_101_luo | luo | byte_perplexity ↓ | 11.976 | | gsarti/flores_101_mal | mal | byte_perplexity ↓ | 4.616 | | gsarti/flores_101_mar | mar | byte_perplexity ↓ | 5.483 | | gsarti/flores_101_mkd | mkd | byte_perplexity ↓ | 2.966 | | gsarti/flores_101_mlt | mlt | byte_perplexity ↓ | 15.005 | | gsarti/flores_101_mon | mon | byte_perplexity ↓ | 3.411 | | gsarti/flores_101_mri | mri | byte_perplexity ↓ | 7.474 | | gsarti/flores_101_msa | msa | byte_perplexity ↓ | 2.571 | | gsarti/flores_101_mya | mya | byte_perplexity ↓ | 2.414 | | gsarti/flores_101_nld | nld | byte_perplexity ↓ | 4.128 | | gsarti/flores_101_nob | nob | byte_perplexity ↓ | 5.403 | | gsarti/flores_101_npi | npi | byte_perplexity ↓ | 5.199 | | gsarti/flores_101_nso | nso | byte_perplexity ↓ | 8.155 | | gsarti/flores_101_nya | nya | byte_perplexity ↓ | 8.18 | | gsarti/flores_101_oci | oci | byte_perplexity ↓ | 4.862 | | gsarti/flores_101_orm | orm | byte_perplexity ↓ | 12.912 | | gsarti/flores_101_ory | ory | byte_perplexity ↓ | 5.189 | | gsarti/flores_101_pan | pan | byte_perplexity ↓ | 4.698 | | gsarti/flores_101_pol | pol | byte_perplexity ↓ | 4.626 | | gsarti/flores_101_por | por | byte_perplexity ↓ | 1.975 | | gsarti/flores_101_pus | pus | byte_perplexity ↓ | 4.496 | | gsarti/flores_101_ron | ron | byte_perplexity ↓ | 4.965 | | gsarti/flores_101_rus | rus | byte_perplexity ↓ | 2.05 | | gsarti/flores_101_slk | slk | byte_perplexity ↓ | 6.451 | | gsarti/flores_101_slv | slv | byte_perplexity ↓ | 6.62 | | gsarti/flores_101_sna | sna | byte_perplexity ↓ | 8.462 | | gsarti/flores_101_snd | snd | byte_perplexity ↓ | 5.466 | | gsarti/flores_101_som | som | byte_perplexity ↓ | 11.959 | | gsarti/flores_101_spa | spa | byte_perplexity ↓ | 1.897 | | gsarti/flores_101_srp | srp | byte_perplexity ↓ | 2.871 | | gsarti/flores_101_swe | swe | byte_perplexity ↓ | 5.055 | | gsarti/flores_101_swh | swh | byte_perplexity ↓ | 3.697 | | gsarti/flores_101_tam | tam | byte_perplexity ↓ | 4.539 | | gsarti/flores_101_tel | tel | byte_perplexity ↓ | 5.807 | | gsarti/flores_101_tgk | tgk | byte_perplexity ↓ | 3.599 | | gsarti/flores_101_tgl | tgl | byte_perplexity ↓ | 5.667 | | gsarti/flores_101_tha | tha | byte_perplexity ↓ | 2.366 | | gsarti/flores_101_tur | tur | byte_perplexity ↓ | 4.885 | | gsarti/flores_101_ukr | ukr | byte_perplexity ↓ | 2.724 | | gsarti/flores_101_umb | umb | byte_perplexity ↓ | 12.767 | | gsarti/flores_101_urd | urd | byte_perplexity ↓ | 1.98 | | gsarti/flores_101_uzb | uzb | byte_perplexity ↓ | 12.002 | | gsarti/flores_101_vie | vie | byte_perplexity ↓ | 1.766 | | gsarti/flores_101_wol | wol | byte_perplexity ↓ | 9.144 | | gsarti/flores_101_xho | xho | byte_perplexity ↓ | 7.403 | | gsarti/flores_101_yor | yor | byte_perplexity ↓ | 5.913 | | gsarti/flores_101_zho_simpl | zho_simpl | byte_perplexity ↓ | 2.277 | | gsarti/flores_101_zho_trad | zho_trad | byte_perplexity ↓ | 2.518 | | gsarti/flores_101_zul | zul | byte_perplexity ↓ | 8.534 | | headqa | esp | acc ↑ | 0.264 | | hellaswag | eng | acc ↑ | 0.412 | | logiqa | eng | acc ↑ | 0.207 | | mathqa | eng | acc ↑ | 0.25 | | mc_taco | eng | em ↑ | 0.119 | | mnli (Median of 15 prompts) | eng | acc ↑ | 0.355 | | mnli_mismatched (Median of 15 prompts) | eng | acc ↑ | 0.352 | | mrpc | eng | acc ↑ | 0.586 | | multirc (Median of 11 prompts) | eng | acc ↑ | 0.538 | | openbookqa | eng | acc ↑ | 0.216 | | piqa | eng | acc ↑ | 0.708 | | prost | eng | acc ↑ | 0.227 | | pubmedqa | eng | acc ↑ | 0.616 | | qnli | eng | acc ↑ | 0.507 | | qqp (Median of 7 prompts) | eng | acc ↑ | 0.384 | | race | eng | acc ↑ | 0.352 | | rte (Median of 6 prompts) | eng | acc ↑ | 0.477 | | sciq | eng | acc ↑ | 0.892 | | sst (Median of 6 prompts) | eng | acc ↑ | 0.518 | | triviaqa | eng | acc ↑ | 0.042 | | tydiqa_primary (Median of 24 prompts) | eng | acc ↑ | 0.301 | | webqs | eng | acc ↑ | 0.017 | | wic (Median of 11 prompts) | eng | acc ↑ | 0.502 | | winogrande | eng | acc ↑ | 0.586 | | wnli (Median of 6 prompts) | eng | acc ↑ | 0.472 | | wsc (Median of 11 prompts) | eng | acc ↑ | 0.442 | | humaneval | python | pass@1 ↑ | 0.155 | | humaneval | python | pass@10 ↑ | 0.322 | | humaneval | python | pass@100 ↑ | 0.555 | **Train-time Evaluation:** As of 25.May.2022, 15:00 PST: - Training Loss: 2.0 - Validation Loss: 2.2 - Perplexity: 8.9 </details> <p>&nbsp;</p> ## Recommendations *This section provides information on warnings and potential mitigations.* <details> <summary>Click to expand</summary><br/> - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models pretrained with the LLM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. </details> <p>&nbsp;</p> ## Glossary and Calculations *This section defines common terms and how metrics are calculated.* <details> <summary>Click to expand</summary><br/> - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. </details> <p>&nbsp;</p> ## More Information <details> <summary>Click to expand</summary><br/> ### Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ### Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ### Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book </details> <p>&nbsp;</p> ## Model Card Authors *Ordered roughly chronologically and by amount of time spent.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff
DeskDown/MarianMixFT_en-ja
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zhs - zht - zu pipeline_tag: text-generation --- <h1 style='text-align: center '>BLOOM LM</h1> <h2 style='text-align: center '><em>BigScience Large Open-science Open-access Multilingual Language Model</em> </h2> <h3 style='text-align: center '>Model Card</h3> <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> Version 1.0 / 26.May.2022 ## Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Training Data](#training-data) 4. [Risks and Limitations](#risks-and-limitations) 5. [Evaluation](#evaluation) 6. [Recommendations](#recommendations) 7. [Glossary and Calculations](#glossary-and-calculations) 8. [More Information](#more-information) 9. [Model Card Authors](#model-card-authors) ## Model Details ### Basics *This section provides information for anyone who wants to know about the model.* <details> <summary>Click to expand</summary> <br/> **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) * All collaborators are either volunteers or have an agreement with their employer. *(Further breakdown of participants forthcoming.)* **Model Type:** Transformer-based Language Model **Version:** 1.0.0 **Languages:** Multiple; see [training data](#training-data) **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license)) **Release Date Estimate:** Monday, 11.July.2022 **Send Questions to:** [email protected] **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* </details> ### Technical Specifications *This section provides information for people who work on model development.* <details> <summary>Click to expand</summary><br/> Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. **Model Architecture:** Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 7,069,016,064 parameters: * 1,027,604,480 embedding parameters * 30 layers, 32 attention heads * Hidden layers are 4096-dimensional * Sequence length of 2048 tokens used (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). **Compute infrastructure:** Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). * Hardware: 384 A100 80GB GPUs (48 nodes): * Additional 32 A100 80GB GPUs (4 nodes) in reserve * 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links * CPU: AMD * CPU memory: 512GB per node * GPU memory: 640GB per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes * Software: * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) #### **Training** Training logs: [Tensorboard link](https://huggingface.co/tensorboard/bigscience/tr11c-2B5-logs) - Number of epochs: 1 (*current target*) - Dates: - Started 11th March, 2022 11:42am PST - Ended 5th July, 2022 - Estimated cost of training: Equivalent of $2-5M in cloud computing (including preliminary experiments) - Server training location: Île-de-France, France #### **Tokenization** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)) is a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. </details> ### Environmental Impact <details> <summary>Click to expand</summary><br/> The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming upon completion of training.)* **Estimated electricity usage:** *(Forthcoming upon completion of training.)* </details> <p>&nbsp;</p> ## Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model. It provides information for anyone considering using the model or who is affected by the model.* <details> <summary>Click to expand</summary><br/> ### Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. #### **Direct Use** - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings #### **Downstream Use** - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### **Out-of-scope Uses** Using the model in [high-stakes](#high-stakes) settings is out of scope for this model.  The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but is not correct. ##### Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### **Misuse** Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ### Intended Users #### **Direct Users** - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups #### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) #### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM </details> <p>&nbsp;</p> ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* <details> <summary>Click to expand</summary><br/> Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus). Training data includes: - 45 natural languages - 12 programming languages - In 1.5TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) #### **Languages** The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_chart.svg?raw=true) The following table shows the further distribution of Niger-Congo and Indic languages in the training data. <details> <summary>Click to expand</summary><br/> | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------ |------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Northern Sotho | 0.0002 | | Malayalam | 0.10 | | Fon | 0.0002 | | Urdu | 0.10 | | Kirundi | 0.0003 | | Tamil | 0.20 | | Wolof | 0.0004 | | Bengali | 0.50 | | Kuganda | 0.0004 | | Hindi | 0.70 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | </details> The following table shows the distribution of programming languages. <details> <summary>Click to expand</summary><br/> | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | </details> </details> <p>&nbsp;</p> ## Risks and Limitations *This section identifies foreseeable harms and misunderstandings.* <details> <summary>Click to expand</summary><br/> Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs </details> <p>&nbsp;</p> ## Evaluation *This section describes the evaluation protocols and provides the results.* <details> <summary>Click to expand</summary><br/> ### Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ### Factors *This section lists some different aspects of BLOOM models. Its focus is on those aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ### Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Train-time Evaluation:** As of 25.May.2022, 15:00 PST: - Training Loss: 2.3 - Validation Loss: 2.9 - Perplexity: 16 </details> <p>&nbsp;</p> ## Recommendations *This section provides information on warnings and potential mitigations.* <details> <summary>Click to expand</summary><br/> - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models pretrained with the LLM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. </details> <p>&nbsp;</p> ## Glossary and Calculations *This section defines common terms and how metrics are calculated.* <details> <summary>Click to expand</summary><br/> - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. </details> <p>&nbsp;</p> ## More Information <details> <summary>Click to expand</summary><br/> ### Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ### Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ### Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book </details> <p>&nbsp;</p> ## Model Card Authors *Ordered roughly chronologically and by amount of time spent.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff
DeskDown/MarianMixFT_en-ms
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: bigscience-bloom-rail-1.0 language: - ak - ar - as - bm - bn - ca - code - en - es - eu - fon - fr - gu - hi - id - ig - ki - kn - lg - ln - ml - mr - ne - nso - ny - or - pa - pt - rn - rw - sn - st - sw - ta - te - tn - ts - tum - tw - ur - vi - wo - xh - yo - zh - zu programming_language: - C - C++ - C# - Go - Java - JavaScript - Lua - PHP - Python - Ruby - Rust - Scala - TypeScript pipeline_tag: text-generation widget: - text: 'A "whatpu" is a small, furry animal native to Tanzania. An example of a sentence that uses the word whatpu is: We were traveling in Africa and we saw these very cute whatpus. | To do a "farduddle" means to jump up and down really fast. An example of a sentence that uses the word farduddle is:' example_title: Imaginary word group: English - text: 'Un "whatpu" est un petit animal à fourrure originaire de Tanzanie. Un exemple de phrase qui utilise le mot whatpu est: Nous étions en Afrique et nous avons vu des whatpus trop mignons. Faire un "farduddle" veut dire sauter sur place vraiment vite. Un exemple de phrase qui utilise le mot farduddle est:' example_title: Imaginary word group: French - text: 'Un "whatpu" es un pequeño animal peludo nativo de Tanzania. Un ejemplo de una oración que usa la palabra whatpu es: Estábamos viajando por África y vimos estos whatpus muy bonitos. Hacer un "farduddle" significa saltar arriba y abajo muy rápido. Un ejemplo de una oración que usa la palabra farduddle es:' example_title: Imaginary word group: Spanish - text: ' ال"واتبو" هو حيوان صغير مكسو بالفراء يعيش في تنزانيا. مثال على جملة تستخدم كلمة واتبو هي: كنا نسافر في افريقيا و رأينا هؤلاء الواتبو اللطفاء. للقيام ب"فاردادل" يعني ان تقفز للأعلى و الأسفل بسرعة كبيرة. مثال على جملة تستخدم كلمة فاردادل هي:' example_title: Imaginary word group: Arabic - text: 'Um "whatpu" é um pequeno animal peludo nativo da Tanzânia. Um exemplo de uma frase que usa a palavra whatpu é: Estávamos a viajar por África e vimos uns whatpus muito queridos. Fazer um "farduddle" significa saltar para cima e para baixo muito rápido. Um exemplo de uma frase que usa a palavra farduddle é:' example : Imaginary word group: Portuguese - text: Pour déguster un ortolan, il faut tout d'abord example_title: Recipe group: French - text: |- 34+10=44 54+20= example_title: Addition group: Math - text: |- This tool converts irregular verbs to past tense. Arise - Arose Become - Became Forget - Forgot Freeze - example_title: Irregular verbs group: English - text: |- Please unscramble the letters into a word, and write that word: r e!c.i p r o.c a/l = reciprocal d.o m i!n a n.t = example_title: Word unscrambling group: English - text: |- Estos ejemplos quitan vocales de las palabras Ejemplos: hola - hl manzana - mnzn papas - pps alacran - lcrn papa - example_title: Vowel removal group: Spanish - text: |- Traduce español de España a español de Argentina El coche es rojo - el auto es rojo El ordenador es nuevo - la computadora es nueva el boligrafo es negro - lapicera es negra la nevera example_title: Spanish to Argentinian Spanish group: Spanish - text: To say "I love you" in Hindi, you would say example_title: Translation to Hindi group: English - text: To say "I love you" in Hindi, you would say example_title: Translation from English group: Hindi - text: 'Poor English: She no went to the market. Corrected English:' example_title: Grammar exercise 1 group: English - text: 'استخراج العدد العاملي في لغة بايثون:' example_title: Code generation group: Arabic - text: 'Regexp. Here is a regular expression to match a word starting with a number and then having only vowels:' example_title: Regular expressions group: English - text: |- Do a hello world in different languages: Python: print("hello world") R: example_title: Code generation group: English - text: |- Which is the correct preposition? I'm born X July. X is the preposition in He sat X a chair. X is the preposition on She drove X the bridge. X is the preposition example_title: Grammar exercise 2 group: English - text: |- Traduction en français: Dans cet essai je vais m'interroger sur la conscience des modèles d'intelligence artificielle récents comme les modèles de langue. Pour commencer, je m'intéresserai à la notion de conscience et à ce qui la caractérise. Ensuite, j'aborderai la question de l'intelligence et de son lien avec le langage. Enfin, dans une dernière partie je me pencherai sur le cas de l'IA et sur sa conscience. Traduction en espagnol: example_title: Translation to Spanish group: French - text: |- Traducción al francés: Dans cet essai je vais m'interroger sur la conscience des modèles d'intelligence artificielle récents comme les modèles de langue. Pour commencer, je m'intéresserai à la notion de conscience et à ce qui la caractérise. Ensuite, j'aborderai la question de l'intelligence et de son lien avec le langage. Enfin, dans une dernière partie je me pencherai sur le cas de l'IA et sur sa conscience. Traducción al español: example_title: Translation from French group: Spanish - text: ذات مرة ، عاش شبل الدب في الغابة example_title: Fairy tale group: Arabic - text: एक बार की बात है, जंगल में एक भालू का शावक रहता था example_title: Fairy tale group: Hindi - text: Il était une fois une licorne qui vivait example_title: Fairy tale group: French - text: |- Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there? A: Let's think step by step. example_title: Mathematical reasoning group: English co2_eq_emissions: emissions: 24_700_000 source: "Estimating the Carbon Footprint of BLOOM, a 176B Parameter Language Model. https://arxiv.org/abs/2211.02001" training_type: "pre-training" geographical_location: "Orsay, France" hardware_used: "384 A100 80GB GPUs" model-index: - name: bloom results: - task: type: text-generation dataset: type: openai_humaneval name: humaneval metrics: - name: pass@1 type: pass@1 value: 0.15542682926829265 verified: false - name: pass@10 type: pass@10 value: 0.3278356276947017 verified: false - name: pass@100 type: pass@100 value: 0.5719815685597749 verified: false --- <img src="https://s3.amazonaws.com/moonup/production/uploads/1657124309515-5f17f0a0925b9863e28ad517.png" alt="BigScience Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> BigScience Large Open-science Open-access Multilingual Language Model Version 1.3 / 6 July 2022 Current Checkpoint: **Training Iteration 95000** Link to paper: [here](https://arxiv.org/abs/2211.05100) Total seen tokens: **366B** --- # Model Details BLOOM is an autoregressive Large Language Model (LLM), trained to continue text from a prompt on vast amounts of text data using industrial-scale computational resources. As such, it is able to output coherent text in 46 languages and 13 programming languages that is hardly distinguishable from text written by humans. BLOOM can also be instructed to perform text tasks it hasn't been explicitly trained for, by casting them as text generation tasks. ## Basics *This section provides information about the model type, version, license, funders, release date, developers, and contact information.* *It is useful for anyone who wants to reference the model.* <details> <summary>Click to expand</summary> **Developed by:** BigScience ([website](https://bigscience.huggingface.co)) *All collaborators are either volunteers or have an agreement with their employer. (Further breakdown of participants forthcoming.)* **Model Type:** Transformer-based Language Model **Checkpoints format:** `transformers` (Megatron-DeepSpeed format available [here](https://huggingface.co/bigscience/bloom-optimizer-states)) **Version:** 1.0.0 **Languages:** Multiple; see [training data](#training-data) **License:** RAIL License v1.0 ([link](https://huggingface.co/spaces/bigscience/license) / [article and FAQ](https://bigscience.huggingface.co/blog/the-bigscience-rail-license)) **Release Date Estimate:** Monday, 11.July.2022 **Send Questions to:** [email protected] **Cite as:** BigScience, _BigScience Language Open-science Open-access Multilingual (BLOOM) Language Model_. International, May 2021-May 2022 **Funded by:** * The French government. * Hugging Face ([website](https://huggingface.co)). * Organizations of contributors. *(Further breakdown of organizations forthcoming.)* </details> ## Technical Specifications *This section includes details about the model objective and architecture, and the compute infrastructure.* *It is useful for people interested in model development.* <details> <summary>Click to expand</summary> Please see [the BLOOM training README](https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml#readme) for full details on replicating training. ### Model Architecture and Objective * Modified from Megatron-LM GPT2 (see [paper](https://arxiv.org/abs/1909.08053), [BLOOM Megatron code](https://github.com/bigscience-workshop/Megatron-DeepSpeed)): * Decoder-only architecture * Layer normalization applied to word embeddings layer (`StableEmbedding`; see [code](https://github.com/facebookresearch/bitsandbytes), [paper](https://arxiv.org/pdf/2110.02861.pdf)) * ALiBI positional encodings (see [paper](https://arxiv.org/pdf/2108.12409.pdf)), with GeLU activation functions * 176,247,271,424 parameters: * 3,596,615,680 embedding parameters * 70 layers, 112 attention heads * Hidden layers are 14336-dimensional * Sequence length of 2048 tokens used (see [BLOOM tokenizer](https://huggingface.co/bigscience/tokenizer), [tokenizer description](#tokenization)) **Objective Function:** Cross Entropy with mean reduction (see [API documentation](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss)). ### Compute infrastructure Jean Zay Public Supercomputer, provided by the French government (see [announcement](https://www.enseignementsup-recherche.gouv.fr/fr/signature-du-marche-d-acquisition-de-l-un-des-supercalculateurs-les-plus-puissants-d-europe-46733)). #### Hardware * 384 A100 80GB GPUs (48 nodes) * Additional 32 A100 80GB GPUs (4 nodes) in reserve * 8 GPUs per node Using NVLink 4 inter-gpu connects, 4 OmniPath links * CPU: AMD * CPU memory: 512GB per node * GPU memory: 640GB per node * Inter-node connect: Omni-Path Architecture (OPA) * NCCL-communications network: a fully dedicated subnet * Disc IO network: shared network with other types of nodes #### Software * Megatron-DeepSpeed ([Github link](https://github.com/bigscience-workshop/Megatron-DeepSpeed)) * DeepSpeed ([Github link](https://github.com/microsoft/DeepSpeed)) * PyTorch (pytorch-1.11 w/ CUDA-11.5; see [Github link](https://github.com/pytorch/pytorch)) * apex ([Github link](https://github.com/NVIDIA/apex)) </details> --- # Training *This section provides information about the training data, the speed and size of training elements, and the environmental impact of training.* *It is useful for people who want to learn more about the model inputs and training footprint.* <details> <summary>Click to expand</summary> ## Training Data *This section provides a high-level overview of the training data. It is relevant for anyone who wants to know the basics of what the model is learning.* Details for each dataset are provided in individual [Data Cards](https://huggingface.co/spaces/bigscience/BigScienceCorpus), and the sizes of each of their contributions to the aggregated training data are presented in an [Interactive Corpus Map](https://huggingface.co/spaces/bigscience-catalogue-lm-data/corpus-map). Training data includes: - 46 natural languages - 13 programming languages - In 1.6TB of pre-processed text, converted into 350B unique tokens (see [the tokenizer section](#tokenization) for more.) ### Languages The pie chart shows the distribution of languages in training data. ![pie chart showing the distribution of languages in training data](https://github.com/bigscience-workshop/model_card/blob/main/assets/data/pie_v2.svg?raw=true) The following tables shows the further distribution of Niger-Congo & Indic languages and programming languages in the training data. Distribution of Niger Congo and Indic languages. | Niger Congo | Percentage | | Indic | Percentage | |----------------|------------| ------ |-----------|------------| | Chi Tumbuka | 0.00002 | | Assamese | 0.01 | | Kikuyu | 0.00004 | | Odia | 0.04 | | Bambara | 0.00004 | | Gujarati | 0.04 | | Akan | 0.00007 | | Marathi | 0.05 | | Xitsonga | 0.00007 | | Punjabi | 0.05 | | Sesotho | 0.00007 | | Kannada | 0.06 | | Chi Chewa | 0.0001 | | Nepali | 0.07 | | Setswana | 0.0002 | | Telugu | 0.09 | | Lingala | 0.0002 | | Malayalam | 0.10 | | Northern Sotho | 0.0002 | | Urdu | 0.10 | | Fon | 0.0002 | | Tamil | 0.20 | | Kirundi | 0.0003 | | Bengali | 0.50 | | Wolof | 0.0004 | | Hindi | 0.70 | | Luganda | 0.0004 | | Chi Shona | 0.001 | | Isi Zulu | 0.001 | | Igbo | 0.001 | | Xhosa | 0.001 | | Kinyarwanda | 0.003 | | Yoruba | 0.006 | | Swahili | 0.02 | Distribution of programming languages. | Extension | Language | Number of files | |----------------|------------|-----------------| | java | Java | 5,407,724 | | php | PHP | 4,942,186 | | cpp | C++ | 2,503,930 | | py | Python | 2,435,072 | | js | JavaScript | 1,905,518 | | cs | C# | 1,577,347 | | rb | Ruby | 6,78,413 | | cc | C++ | 443,054 | | hpp | C++ | 391,048 | | lua | Lua | 352,317 | | go | GO | 227,763 | | ts | TypeScript | 195,254 | | C | C | 134,537 | | scala | Scala | 92,052 | | hh | C++ | 67,161 | | H | C++ | 55,899 | | tsx | TypeScript | 33,107 | | rs | Rust | 29,693 | | phpt | PHP | 9,702 | | c++ | C++ | 1,342 | | h++ | C++ | 791 | | php3 | PHP | 540 | | phps | PHP | 270 | | php5 | PHP | 166 | | php4 | PHP | 29 | ### Preprocessing **Tokenization:** The BLOOM tokenizer ([link](https://huggingface.co/bigscience/tokenizer)), a learned subword tokenizer trained using: - A byte-level Byte Pair Encoding (BPE) algorithm - A simple pre-tokenization rule, no normalization - A vocabulary size of 250,680 It was trained on a subset of a preliminary version of the corpus using alpha-weighting per language. ## Speeds, Sizes, Times Training logs: [Tensorboard link](https://huggingface.co/tensorboard/bigscience/tr11-176B-ml-logs/) - Dates: - Started 11th March, 2022 11:42am PST - Estimated end: 5th July, 2022 - Checkpoint size: - Bf16 weights: 329GB - Full checkpoint with optimizer states: 2.3TB - Training throughput: About 150 TFLOP per GPU per second - Number of epochs: 1 - Estimated cost of training: Equivalent of $2-5M in cloud computing (including preliminary experiments) - Server training location: Île-de-France, France ## Environmental Impact The training supercomputer, Jean Zay ([website](http://www.idris.fr/eng/jean-zay/jean-zay-presentation-eng.html)), uses mostly nuclear energy. The heat generated by it is reused for heating campus housing. **Estimated carbon emissions:** *(Forthcoming.)* **Estimated electricity usage:** *(Forthcoming.)* </details> --- # Uses *This section addresses questions around how the model is intended to be used, discusses the foreseeable users of the model (including those affected by the model), and describes uses that are considered out of scope or misuse of the model.* *It is useful for anyone considering using the model or who is affected by the model.* <details> <summary>Click to expand</summary> ## How to use This model can be easily used and deployed using HuggingFace's ecosystem. This needs `transformers` and `accelerate` installed. The model can be downloaded as follows: <img src="https://s3.amazonaws.com/moonup/production/uploads/1657271608456-62441d1d9fdefb55a0b7d12c.png" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> ## Intended Use This model is being created in order to enable public research on large language models (LLMs). LLMs are intended to be used for language generation or as a pretrained base model that can be further fine-tuned for specific tasks. Use cases below are not exhaustive. ### Direct Use - Text generation - Exploring characteristics of language generated by a language model - Examples: Cloze tests, counterfactuals, generations with reframings ### Downstream Use - Tasks that leverage language models include: Information Extraction, Question Answering, Summarization ### Misuse and Out-of-scope Use *This section addresses what users ought not do with the model.* See the [BLOOM License](https://huggingface.co/spaces/bigscience/license), Attachment A, for detailed usage restrictions. The below list is non-exhaustive, but lists some easily foreseeable problematic use cases. #### Out-of-scope Uses Using the model in [high-stakes](#high-stakes) settings is out of scope for this model. The model is not designed for [critical decisions](#critical-decisions) nor uses with any material consequences on an individual's livelihood or wellbeing. The model outputs content that appears factual but may not be correct. Out-of-scope Uses Include: - Usage in biomedical domains, political and legal domains, or finance domains - Usage for evaluating or scoring individuals, such as for employment, education, or credit - Applying the model for critical automatic decisions, generating factual content, creating reliable summaries, or generating predictions that must be correct #### Misuse Intentionally using the model for harm, violating [human rights](#human-rights), or other kinds of malicious activities, is a misuse of this model. This includes: - Spam generation - Disinformation and influence operations - Disparagement and defamation - Harassment and abuse - [Deception](#deception) - Unconsented impersonation and imitation - Unconsented surveillance - Generating content without attribution to the model, as specified in the [RAIL License, Use Restrictions](https://huggingface.co/spaces/bigscience/license) ## Intended Users ### Direct Users - General Public - Researchers - Students - Educators - Engineers/developers - Non-commercial entities - Community advocates, including human and civil rights groups ### Indirect Users - Users of derivatives created by Direct Users, such as those using software with an [intended use](#intended-use) - Users of [Derivatives of the Model, as described in the License](https://huggingface.co/spaces/bigscience/license) ### Others Affected (Parties Prenantes) - People and groups referred to by the LLM - People and groups exposed to outputs of, or decisions based on, the LLM - People and groups whose original work is included in the LLM </details> --- # Risks and Limitations *This section identifies foreseeable harms and misunderstandings.* <details> <summary>Click to expand</summary> Model may: - Overrepresent some viewpoints and underrepresent others - Contain stereotypes - Contain [personal information](#personal-data-and-information) - Generate: - Hateful, abusive, or violent language - Discriminatory or prejudicial language - Content that may not be appropriate for all settings, including sexual content - Make errors, including producing incorrect information as if it were factual - Generate irrelevant or repetitive outputs - Induce users into attributing human traits to it, such as sentience or consciousness </details> --- # Evaluation *This section describes the evaluation protocols and provides the results.* <details> <summary>Click to expand</summary> ## Metrics *This section describes the different ways performance is calculated and why.* Includes: | Metric | Why chosen | |--------------------|--------------------------------------------------------------------| | [Perplexity](#perplexity) | Standard metric for quantifying model improvements during training | | Cross Entropy [Loss](#loss) | Standard objective for language models. | And multiple different metrics for specific tasks. _(More evaluation metrics forthcoming upon completion of evaluation protocol.)_ ## Factors *This section lists some different aspects of BLOOM models. Its focus is on aspects that are likely to give rise to high variance in model behavior.* - Language, such as English or Yoruba - Domain, such as newswire or stories - Demographic characteristics, such as gender or nationality ## Results *Results are based on the [Factors](#factors) and [Metrics](#metrics).* **Zero-shot evaluations:** <span style="color:red"><b>WARNING:</b> This section used to contain much more results, however they were not correct and we released without the approval of the evaluation working group. We are currently in the process of fixing the evaluations.</span> See this repository for JSON files: https://github.com/bigscience-workshop/evaluation-results | Task | Language | Metric | BLOOM-176B | OPT-175B* | |:--------|:-----------------|:------------------------|-------------:|------------:| | humaneval | python | pass@1 ↑ | 0.155 | 0.0 | | humaneval | python | pass@10 ↑ | 0.328 | 0.0 | | humaneval | python | pass@100 ↑ | 0.572 | 0.003 | **Train-time Evaluation:** Final checkpoint after 95K steps: - Training Loss: 1.939 - Validation Loss: 2.061 - Perplexity: 7.045 For more see: https://huggingface.co/bigscience/tr11-176B-ml-logs </details> --- # Recommendations *This section provides information on warnings and potential mitigations.* <details> <summary>Click to expand</summary> - Indirect users should be made aware when the content they're working with is created by the LLM. - Users should be aware of [Risks and Limitations](#risks-and-limitations), and include an appropriate age disclaimer or blocking interface as necessary. - Models trained or finetuned downstream of BLOOM LM should include an updated Model Card. - Users of the model should provide mechanisms for those affected to provide feedback, such as an email address for comments. </details> --- # Glossary and Calculations *This section defines common terms and how metrics are calculated.* <details> <summary>Click to expand</summary> - <a name="loss">**Loss:**</a> A calculation of the difference between what the model has learned and what the data shows ("groundtruth"). The lower the loss, the better. The training process aims to minimize the loss. - <a name="perplexity">**Perplexity:**</a> This is based on what the model estimates the probability of new data is. The lower the perplexity, the better. If the model is 100% correct at predicting the next token it will see, then the perplexity is 1. Mathematically this is calculated using entropy. - <a name="high-stakes">**High-stakes settings:**</a> Such as those identified as "high-risk AI systems" and "unacceptable risk AI systems" in the European Union's proposed [Artificial Intelligence (AI) Act](https://artificialintelligenceact.eu/annexes/). - <a name="critical-decisions">**Critical decisions:**</a> Such as those defined in [the United States' proposed Algorithmic Accountability Act](https://www.congress.gov/117/bills/s3572/BILLS-117s3572is.pdf). - <a name="human-rights">**Human rights:**</a> Includes those rights defined in the [Universal Declaration of Human Rights](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf). - <a name="personal-data-and-information">**Personal Data and Personal Information:**</a> Personal data and information is defined in multiple data protection regulations, such as "[personal data](https://gdpr-info.eu/issues/personal-data/)" in the [European Union's General Data Protection Regulation](https://gdpr-info.eu); and "personal information" in the Republic of South Africa's [Protection of Personal Information Act](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf), The People's Republic of China's [Personal information protection law](http://en.npc.gov.cn.cdurl.cn/2021-12/29/c_694559.htm). - <a name="sensitive-characteristics">**Sensitive characteristics:**</a> This includes specifically protected categories in human rights (see [UHDR, Article 2](https://www.un.org/sites/un2.un.org/files/2021/03/udhr.pdf)) and personal information regulation (see GDPR, [Article 9; Protection of Personal Information Act, Chapter 1](https://www.gov.za/sites/default/files/gcis_document/201409/3706726-11act4of2013popi.pdf)) - <a name="deception">**Deception:**</a> Doing something to intentionally mislead individuals to believe something that is false, such as by creating deadbots or chatbots on social media posing as real people, or generating text documents without making consumers aware that the text is machine generated. </details> --- # More Information *This section provides links to writing on dataset creation, technical specifications, lessons learned, and initial results.* <details> <summary>Click to expand</summary> ## Intermediate checkpoints For academic (or any) usage, we published the intermediate checkpoints, corresponding to the model state at each 5000 steps. Please follow [this link](https://huggingface.co/bigscience/bloom-176-intermediate) to get these checkpoints. ## Dataset Creation Blog post detailing the design choices during the dataset creation: https://bigscience.huggingface.co/blog/building-a-tb-scale-multilingual-dataset-for-language-modeling ## Technical Specifications Blog post summarizing how the architecture, size, shape, and pre-training duration where selected: https://bigscience.huggingface.co/blog/what-language-model-to-train-if-you-have-two-million-gpu-hours More details on the architecture/optimizer: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Blog post on the hardware/engineering side: https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model Details on the distributed setup used for the training: https://github.com/bigscience-workshop/bigscience/tree/master/train/tr11-176B-ml Tensorboard updated during the training: https://huggingface.co/bigscience/tr11-176B-ml-logs/tensorboard#scalars&tagFilter=loss ## Lessons Insights on how to approach training, negative results: https://github.com/bigscience-workshop/bigscience/blob/master/train/lessons-learned.md Details on the obstacles overcome during the preparation on the engineering side (instabilities, optimization of training throughput, so many technical tricks and questions): https://github.com/bigscience-workshop/bigscience/blob/master/train/tr11-176B-ml/chronicles.md ## Initial Results Initial prompting experiments using interim checkpoints: https://huggingface.co/spaces/bigscience/bloom-book </details> ## Original checkpoints The checkpoints in this repo correspond to the HuggingFace Transformers format. If you want to use our fork of [Megatron-DeepSpeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) that the model was trained with, you'd want to use [this repo instead](https://huggingface.co/bigscience/bloom-optimizer-states). Many intermediate checkpoints are available at https://huggingface.co/bigscience/bloom-intermediate/ --- # Model Card Authors *Ordered roughly chronologically and by amount of time spent on creating this model card.* Margaret Mitchell, Giada Pistilli, Yacine Jernite, Ezinwanne Ozoani, Marissa Gerchick, Nazneen Rajani, Sasha Luccioni, Irene Solaiman, Maraim Masoud, Somaieh Nikpoor, Carlos Muñoz Ferrandis, Stas Bekman, Christopher Akiki, Danish Contractor, David Lansky, Angelina McMillan-Major, Tristan Thrush, Suzana Ilić, Gérard Dupont, Shayne Longpre, Manan Dey, Stella Biderman, Douwe Kiela, Emi Baylor, Teven Le Scao, Aaron Gokaslan, Julien Launay, Niklas Muennighoff
DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - name: Accuracy type: accuracy value: 0.9161290322580645 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7755 - Accuracy: 0.9161 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 4.2893 | 1.0 | 318 | 3.2831 | 0.7403 | | 2.629 | 2.0 | 636 | 1.8731 | 0.8348 | | 1.5481 | 3.0 | 954 | 1.1581 | 0.8906 | | 1.0137 | 4.0 | 1272 | 0.8585 | 0.9077 | | 0.797 | 5.0 | 1590 | 0.7755 | 0.9161 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.16.1 - Tokenizers 0.10.3
DevsIA/imagenes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-19T13:02:01Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - clinc_oos metrics: - accuracy model-index: - name: distilbert-base-uncased-distilled-clinc results: - task: name: Text Classification type: text-classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - name: Accuracy type: accuracy value: 0.9409677419354838 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-distilled-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.1004 - Accuracy: 0.9410 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9037 | 1.0 | 318 | 0.5745 | 0.7326 | | 0.4486 | 2.0 | 636 | 0.2866 | 0.8819 | | 0.2537 | 3.0 | 954 | 0.1794 | 0.9210 | | 0.1762 | 4.0 | 1272 | 0.1387 | 0.9294 | | 0.1419 | 5.0 | 1590 | 0.1210 | 0.9358 | | 0.1247 | 6.0 | 1908 | 0.1119 | 0.9413 | | 0.1138 | 7.0 | 2226 | 0.1067 | 0.9387 | | 0.1078 | 8.0 | 2544 | 0.1026 | 0.9423 | | 0.1043 | 9.0 | 2862 | 0.1010 | 0.9413 | | 0.102 | 10.0 | 3180 | 0.1004 | 0.9410 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0 - Datasets 1.16.1 - Tokenizers 0.10.3
DewiBrynJones/wav2vec2-large-xlsr-welsh
[ "cy", "dataset:common_voice", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - scibert - token-classification - medical-domain metrics: - f1 - precision - recall dataset: - Mathking/primary_outcomes widget: - text: "The FIRST primary outcome is pain at 12 months as measured by the VAS. The primary analysis is to assess whether surgical correction " example_title: "PubMed Article : Methods section" --- # Scibert Finetuned for Primary Outcomes extraction in Biomedical Text ## Model description SciBERT (Scivocab Uncased Version) finetuned on dataset from A. Koroleva ). This model is trained to detect primary outcomes (token classification) in biomedical articles. ## Intended uses & limitations This model was trained to detect primary outcomes in medical articles. ## Training data [Dataset from A. Koroleva](https://zenodo.org/record/3234811#.YnodlVzP2EI) ## Evaluation results Here are the results from the different models evaluated. Evaluation is done using 10-fold cross validation using the 10 splits originally defined in the dataset. | | model | f1 | precision | recall | |---:|:-------------------------|---------:|------------:|---------:| | 1 | biobert-v1.1 | 0.873347 | 0.870358 | 0.878657 | | 2 | scibert_scivocab_uncased | 0.870242 | 0.863077 | 0.879617 | | 3 | biobert_v1.0_pubmed_pmc | 0.863499 | 0.858515 | 0.8713 | | 4 | scibert_scivocab_cased | 0.85848 | 0.859122 | 0.862749 |
DheerajPranav/Dialo-GPT-Rick-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-19T13:06:45Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 289.34 +/- 23.86 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Dhritam/Zova-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-19T13:22:49Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 265.29 +/- 18.80 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Dhruva/Interstellar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: fairseq task: text-to-speech tags: - fairseq - audio - text-to-speech language: en datasets: - ljspeech widget: - text: "Hello, this is a test run." example_title: "Hello, this is a test run." --- # fastspeech2-en-ljspeech [FastSpeech 2](https://arxiv.org/abs/2006.04558) text-to-speech model from fairseq S^2 ([paper](https://arxiv.org/abs/2109.06912)/[code](https://github.com/pytorch/fairseq/tree/main/examples/speech_synthesis)): - English - Single-speaker female voice - Trained on [LJSpeech](https://keithito.com/LJ-Speech-Dataset/) ## Usage ```python from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub from fairseq.models.text_to_speech.hub_interface import TTSHubInterface import IPython.display as ipd models, cfg, task = load_model_ensemble_and_task_from_hf_hub( "facebook/fastspeech2-en-ljspeech", arg_overrides={"vocoder": "hifigan", "fp16": False} ) model = models[0] TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg) generator = task.build_generator(model, cfg) text = "Hello, this is a test run." sample = TTSHubInterface.get_model_input(task, text) wav, rate = TTSHubInterface.get_prediction(task, model, generator, sample) ipd.Audio(wav, rate=rate) ``` See also [fairseq S^2 example](https://github.com/pytorch/fairseq/blob/main/examples/speech_synthesis/docs/ljspeech_example.md). ## Citation ```bibtex @inproceedings{wang-etal-2021-fairseq, title = "fairseq S{\^{}}2: A Scalable and Integrable Speech Synthesis Toolkit", author = "Wang, Changhan and Hsu, Wei-Ning and Adi, Yossi and Polyak, Adam and Lee, Ann and Chen, Peng-Jen and Gu, Jiatao and Pino, Juan", booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2021", address = "Online and Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.emnlp-demo.17", doi = "10.18653/v1/2021.emnlp-demo.17", pages = "143--152", } ```
DicoTiar/wisdomfiy
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-05-19T13:36:45Z
swadeshi_hindiwav2vec2asr/ is a Hindi speech recognition model which is a fine tuned version of the theainerd/Wav2Vec2-large-xlsr-hindi model. The model achieved a Word Error Rate of 0.738 when trained with 12 Hours of MUCS data with 30 epochs and given a batch size of 12.
DingleyMaillotUrgell/homer-bot
[ "pytorch", "gpt2", "text-generation", "en", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
Note: This recipe is trained with the codes from this PR https://github.com/k2-fsa/icefall/pull/349 # Pre-trained Transducer-Stateless2 models for the WenetSpeech dataset with icefall. The model was trained on the L subset of WenetSpeech with the scripts in [icefall](https://github.com/k2-fsa/icefall) based on the latest version k2. ## Training procedure The main repositories are list below, we will update the training and decoding scripts with the update of version. k2: https://github.com/k2-fsa/k2 icefall: https://github.com/k2-fsa/icefall lhotse: https://github.com/lhotse-speech/lhotse * Install k2 and lhotse, k2 installation guide refers to https://k2.readthedocs.io/en/latest/installation/index.html, lhotse refers to https://lhotse.readthedocs.io/en/latest/getting-started.html#installation. I think the latest version would be ok. And please also install the requirements listed in icefall. * Clone icefall(https://github.com/k2-fsa/icefall) and check to the commit showed above. ``` git clone https://github.com/k2-fsa/icefall cd icefall ``` * Preparing data. ``` cd egs/wenetspeech/ASR bash ./prepare.sh ``` * Training ``` export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" ./pruned_transducer_stateless2/train.py \ --world-size 8 \ --num-epochs 15 \ --start-epoch 0 \ --exp-dir pruned_transducer_stateless2/exp \ --lang-dir data/lang_char \ --max-duration 180 \ --valid-interval 3000 \ --model-warm-step 3000 \ --save-every-n 8000 \ --training-subset L ``` ## Evaluation results The decoding results (WER%) on WenetSpeech(dev, test-net and test-meeting) are listed below, we got this result by averaging models from epoch 9 to 10. The WERs are | | dev | test-net | test-meeting | comment | |------------------------------------|-------|----------|--------------|------------------------------------------| | greedy search | 7.80 | 8.75 | 13.49 | --epoch 10, --avg 2, --max-duration 100 | | modified beam search (beam size 4) | 7.76 | 8.71 | 13.41 | --epoch 10, --avg 2, --max-duration 100 | | fast beam search (1best) | 7.94 | 8.74 | 13.80 | --epoch 10, --avg 2, --max-duration 1500 | | fast beam search (nbest) | 9.82 | 10.98 | 16.37 | --epoch 10, --avg 2, --max-duration 600 | | fast beam search (nbest oracle) | 6.88 | 7.18 | 11.77 | --epoch 10, --avg 2, --max-duration 600 | | fast beam search (nbest LG) | 14.94 | 16.14 | 22.93 | --epoch 10, --avg 2, --max-duration 600 |
Doquey/DialoGPT-small-Luisbot1
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-19T17:40:36Z
--- license: other tags: - generated_from_trainer - opt - custom-license - no-commercial - email - auto-complete datasets: - aeslc widget: - text: "Hey <NAME>,\n\nThank you for signing up for my weekly newsletter. Before we get started, you'll have to confirm your email address." example_title: "newsletter" - text: "Hi <NAME>,\n\nI hope this email finds you well. Let me start by saying that I am a big fan of your work." example_title: "fan" - text: "Greetings <NAME>,\n\nI hope you had a splendid evening at the Company sausage eating festival. I am reaching out because" example_title: "festival" - text: "Good Morning <NAME>,\n\nI was just thinking to myself about how much I love creating value" example_title: "value" - text: "URGENT - I need" example_title: "URGENT" inference: parameters: min_length: 4 max_length: 64 length_penalty: 0.7 no_repeat_ngram_size: 3 do_sample: False num_beams: 4 early_stopping: True repetition_penalty: 3.5 --- # opt for email generation - 350M > If you like the idea of wasting less time on emails, further work on this topic can be found [on this hf org page](https://huggingface.co/postbot) Why write the rest of your email when you can generate it? ```python from transformers import pipeline model_tag = "pszemraj/opt-350m-email-generation" generator = pipeline( 'text-generation', model=model_tag, use_fast=False, do_sample=False, early_stopping=True, ) prompt = """ Hello, Following up on the bubblegum shipment.""" generator( prompt, max_length=64, ) # generate ``` - [Link to notebook](https://colab.research.google.com/gist/pszemraj/40c46deed730bfca553b8c4b257a7b77/email-autocomplete-demo.ipynb) on Colab > For this model, formatting matters. The results may be (significantly) different between the structure outlined above and `prompt = "Hey, just wanted to ..."` etc. ## Model description - This model is a fine-tuned version of [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) on the [aeslc](https://huggingface.co/datasets/aeslc) dataset for six epochs. - Emails, phone numbers, etc., were attempted to be excluded in a dataset preparation step using [clean-text](https://pypi.org/project/clean-text/) in Python. - Note that API is restricted to generating 64 tokens - you can generate longer emails by using this in a text-generation `pipeline` object ## Intended uses & limitations - in their everlasting wisdom, Facebook/Meta has decided to make a custom license for this, specifying several things. See [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) for details. ## Training and evaluation data - the `email_body` field of train + validation (get more data) from the [aeslc](https://huggingface.co/datasets/aeslc) dataset. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 16 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 6 ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Tokenizers 0.12.1
Doxophobia/DialoGPT-medium-celeste
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2022-05-19T17:54:04Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-query results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-query This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3668 - Accuracy: 0.8936 - F1: 0.8924 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 5 - eval_batch_size: 5 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.6511 | 1.0 | 30 | 0.5878 | 0.7234 | 0.6985 | | 0.499 | 2.0 | 60 | 0.4520 | 0.8723 | 0.8683 | | 0.3169 | 3.0 | 90 | 0.3668 | 0.8936 | 0.8924 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
albert-xlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
341
2022-05-19T20:50:04Z
--- library_name: stable-baselines3 tags: - Pendulum-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - metrics: - type: mean_reward value: -141.19 +/- 122.27 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pendulum-v1 type: Pendulum-v1 --- # **A2C** Agent playing **Pendulum-v1** This is a trained model of a **A2C** agent playing **Pendulum-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) ```python from huggingface_sb3 import load_from_hub from stable_baselines3 import A2C from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import VecNormalize # Download checkpoint and stats env_id = "Pendulum-v1" checkpoint = load_from_hub(f"araffin/a2c-{env_id}", f"a2c-{env_id}.zip") vec_normalize_stats = load_from_hub(f"araffin/a2c-{env_id}", f"vec_normalize.pkl") # Load the model model = A2C.load(checkpoint) env = make_vec_env(env_id, n_envs=1) env = VecNormalize.load(vec_normalize_stats, env) # do not update them at test time env.training = False # reward normalization is not needed at test time env.norm_reward = False # Evaluate print("Evaluating model") mean_reward, std_reward = evaluate_policy( model, env, n_eval_episodes=20, deterministic=True, ) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # Start a new episode obs = env.reset() try: while True: action, _states = model.predict(obs, deterministic=True) obs, rewards, dones, info = env.step(action) env.render() except KeyboardInterrupt: pass ``` ## Training Code ```python from huggingface_sb3 import package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.vec_env import VecNormalize, sync_envs_normalization # Create the environment env_id = "Pendulum-v1" env = make_vec_env(env_id, n_envs=8) # Normalize env = VecNormalize(env, gamma=0.9) # Create the evaluation env (could be used in `EvalCallback`) eval_env = make_vec_env(env_id, n_envs=1) eval_env = VecNormalize(eval_env, gamma=0.9, training=False, norm_reward=False) # Instantiate the agent model = A2C( "MlpPolicy", env, n_steps=8, gamma=0.9, gae_lambda=0.9, use_sde=True, policy_kwargs=dict(log_std_init=-2), verbose=1, ) # Train the agent try: model.learn(total_timesteps=int(1e6)) except KeyboardInterrupt: pass # Synchronize stats (done automatically in `EvalCallback`) sync_envs_normalization(env, eval_env) package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"araffin/a2c-{env_id}", commit_message="Initial commit", ) ```
albert-xlarge-v2
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,973
2022-05-19T20:59:29Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9348582794629537 - name: Recall type: recall value: 0.9491753618310333 - name: F1 type: f1 value: 0.9419624217118998 - name: Accuracy type: accuracy value: 0.9854889032789781 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0619 - Precision: 0.9349 - Recall: 0.9492 - F1: 0.9420 - Accuracy: 0.9855 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.088 | 1.0 | 1756 | 0.0654 | 0.9144 | 0.9403 | 0.9271 | 0.9831 | | 0.0395 | 2.0 | 3512 | 0.0605 | 0.9274 | 0.9482 | 0.9377 | 0.9851 | | 0.0213 | 3.0 | 5268 | 0.0619 | 0.9349 | 0.9492 | 0.9420 | 0.9855 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
albert-xxlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,091
2022-05-19T21:12:23Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 193.96 +/- 43.39 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2022-05-19T21:25:04Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-homedepot results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-homedepot This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.2826 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.9909 | 1.0 | 4688 | 2.5285 | | 2.5495 | 2.0 | 9376 | 2.3476 | | 2.4198 | 3.0 | 14064 | 2.2841 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,621,271
2022-05-19T21:58:14Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: sentiment-analysis-model-for-socialmedia results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb args: plain_text metrics: - name: Accuracy type: accuracy value: 0.9297083333333334 - name: F1 type: f1 value: 0.9298923658729169 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment-analysis-model-for-socialmedia This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2368 - Accuracy: 0.9297 - F1: 0.9299 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2022-05-19T22:07:03Z
--- tags: - generated_from_trainer datasets: - scitldr model-index: - name: pegasus-scitldr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-scitldr This model is a fine-tuned version of [google/pegasus-large](https://huggingface.co/google/pegasus-large) on the scitldr dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 4 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2022-05-19T22:36:14Z
--- library_name: stable-baselines3 tags: - CartPole-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 --- # **PPO** Agent playing **CartPole-v1** This is a trained model of a **PPO** agent playing **CartPole-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo ppo --env CartPole-v1 -orga sb3 -f logs/ python enjoy.py --algo ppo --env CartPole-v1 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo ppo --env CartPole-v1 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo ppo --env CartPole-v1 -f logs/ -orga sb3 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 256), ('clip_range', 'lin_0.2'), ('ent_coef', 0.0), ('gae_lambda', 0.8), ('gamma', 0.98), ('learning_rate', 'lin_0.001'), ('n_envs', 8), ('n_epochs', 20), ('n_steps', 32), ('n_timesteps', 100000.0), ('policy', 'MlpPolicy'), ('normalize', False)]) ```
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68,305
2022-05-19T23:01:14Z
A facebook/opt-125m model trained on SQUAD for extractive question answering. To use the model format input in the following manner: "(Context Text)\nQuestion:(Question Text)\nAnswer:"
bert-base-multilingual-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,749,504
2022-05-19T23:08:31Z
--- library_name: stable-baselines3 tags: - MountainCar-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: -103.40 +/- 7.49 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: MountainCar-v0 type: MountainCar-v0 --- # **DQN** Agent playing **MountainCar-v0** This is a trained model of a **DQN** agent playing **MountainCar-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env MountainCar-v0 -orga sb3 -f logs/ python enjoy.py --algo dqn --env MountainCar-v0 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env MountainCar-v0 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env MountainCar-v0 -f logs/ -orga sb3 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 128), ('buffer_size', 10000), ('exploration_final_eps', 0.07), ('exploration_fraction', 0.2), ('gamma', 0.98), ('gradient_steps', 8), ('learning_rate', 0.004), ('learning_starts', 1000), ('n_timesteps', 120000.0), ('policy', 'MlpPolicy'), ('policy_kwargs', 'dict(net_arch=[256, 256])'), ('target_update_interval', 600), ('train_freq', 16), ('normalize', False)]) ```
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2022-05-20T00:20:50Z
--- tags: - conversational --- # mawaidhaChatbot Model
bert-large-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388,769
null
--- license: apache-2.0 tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: en_nso_ukuxhumana_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # en_nso_ukuxhumana_model This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-nso](https://huggingface.co/Helsinki-NLP/opus-mt-en-nso) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.8482 - Bleu (before training): 12.2324 - Bleu: 18.9287 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2 - Datasets 1.18.3 - Tokenizers 0.11.0
bert-large-uncased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
480,510
2022-05-20T00:57:19Z
--- library_name: stable-baselines3 tags: - FrozenLake-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 0.78 +/- 0.42 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1 type: FrozenLake-v1 --- # **PPO** Agent playing **FrozenLake-v1** This is a trained model of a **PPO** agent playing **FrozenLake-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
2022-05-20T01:34:52Z
--- language: en thumbnail: http://www.huggingtweets.com/vgdunkey/1658553242358/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/676614171849453568/AZd1Bh-s_400x400.png&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">dunkey</div> <div style="text-align: center; font-size: 14px;">@vgdunkey</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from dunkey. | Data | dunkey | | --- | --- | | Tweets downloaded | 1283 | | Retweets | 147 | | Short tweets | 327 | | Tweets kept | 809 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/bri0i7s5/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @vgdunkey's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/o4oh6dvl) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/o4oh6dvl/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/vgdunkey') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
openai-gpt
[ "pytorch", "tf", "rust", "safetensors", "openai-gpt", "text-generation", "en", "arxiv:1705.11168", "arxiv:1803.02324", "arxiv:1910.09700", "transformers", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "OpenAIGPTLMHeadModel" ], "model_type": "openai-gpt", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
65,432
2022-05-20T05:27:45Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 219.13 +/- 23.38 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
A-bhimany-u08/bert-base-cased-qqp
[ "pytorch", "bert", "text-classification", "dataset:qqp", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
138
2022-05-20T16:35:43Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="pm390/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Aarbor/xlm-roberta-base-finetuned-marc-en
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-21T09:01:03Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 280.46 +/- 18.03 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AdapterHub/roberta-base-pf-newsqa
[ "roberta", "en", "dataset:newsqa", "arxiv:2104.08247", "adapter-transformers", "question-answering" ]
question-answering
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-slippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="DBusAI/q-FrozenLake-v1-8x8-slippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AetherIT/DialoGPT-small-Hal
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-slippery-v3 results: - metrics: - type: mean_reward value: 0.81 +/- 0.39 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="DBusAI/q-FrozenLake-v1-4x4-slippery-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Alexander-Learn/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4 results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="rmn0ff/q-FrozenLake-v1-4x4", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Allybaby21/Allysai
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
## This model belongs to the Styleformer project [Please refer to github page](https://github.com/PrithivirajDamodaran/Styleformer)
AmirHussein/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: ar-adapter-32 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ar-adapter-32 This model was trained from scratch on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.3886 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 352 | 5.6861 | | 5.7356 | 2.0 | 704 | 5.5388 | | 5.5308 | 3.0 | 1056 | 5.4493 | | 5.5308 | 4.0 | 1408 | 5.4030 | | 5.4304 | 5.0 | 1760 | 5.3886 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
AnonymousSub/AR_EManuals-BERT
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="NeutralBlaster/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSub/SR_rule_based_hier_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 149.42 +/- 111.62 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- language: cs widget: - text: "Umělá inteligence pomůže lidstvu překonat budoucí" example_title: "Umělá inteligence ..." - text: "Současný pokrok v oblasti umělých neuronových sítí představuje" example_title: "Současný pokrok ..." - text: "Z hlediska obecné teorie relativity" example_title: "Z hlediska ..." - text: "Vědci objevili šokující nález - stádo jednorožců žijící v odlehlém, dosud neprobádaném údolí v Andách. Ještě větším překvapením pro vědce byla skutečnost, že jednorožci mluvili" example_title: "Vědci objevili ..." license: cc-by-sa-4.0 tags: - text-generation - transformers - pytorch - gpt2 datasets: - wikipedia --- # GPT2-small-czech-cs: a Language Model for Czech text generation (and more NLP tasks ...) ## Introduction GPT2-small-czech-cs is a first experimental model for Czech language based on the GPT-2 small model. It was trained on Czech Wikipedia using **Transfer Learning and Fine-tuning techniques** in about over a weekend on one GPU NVIDIA GTX 1080ti and with about 1GB of training data (cswiki). A training server with couple GPUs for experiments and one RTX 3080 ti was generously provided by [ONYX engineering, spol. s r.o.](http://www.onyx.cz/). This experiment is a proof-of-concept that it is possible to get a state-of-the-art language model in any language with low resources. It was fine-tuned from the [English pre-trained GPT-2 small](https://huggingface.co/gpt2) using the Hugging Face libraries (Transformers and Tokenizers) wrapped into the [fastai2](https://dev.fast.ai/) Deep Learning framework. All the fine-tuning fastai v2 techniques were used. This work was inspired by [Faster than training from scratch — Fine-tuning the English GPT-2 in any language with Hugging Face and fastai v2 (practical case with Portuguese)](https://medium.com/@pierre_guillou/faster-than-training-from-scratch-fine-tuning-the-english-gpt-2-in-any-language-with-hugging-f2ec05c98787), citation below. It is now available on Hugging Face under [gpt2-small-czech-cs](https://huggingface.co/spital/gpt2-small-czech-cs). We release it under [CC BY SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/) (i.e. allowing commercial use). For further information or requests, please post a Github issue at [Github - gpt2-small-czech-cs](https://github.com/spital/gpt2-small-czech-cs). ## Model description *Note: information copied/pasted from [Model: gpt2 >> Model description](https://huggingface.co/gpt2#model-description)* GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. ## How to use GPT2-small-czech-cs with HuggingFace (PyTorch) ### Load the model and its sub-word tokenizer (Byte-level BPE) ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel pretrained = 'spital/gpt2-small-czech-cs' # a local directory or huggingface model name tokenizer = GPT2Tokenizer.from_pretrained(pretrained) model = GPT2LMHeadModel.from_pretrained(pretrained, pad_token_id=tokenizer.eos_token_id) # Sequence length max is 1024 tokenizer.model_max_length = 1024 # disable dropout (or leave in train mode to finetune) model.eval() ``` ### Generate one word ```python import torch # input sequence text = "Umělá inteligence pomůže lidstvu překonat budoucí" inp_tokens = tokenizer(text, return_tensors="pt") # model output outputs = model(**inp_tokens, labels=inp_tokens["input_ids"]) loss, logits = outputs[:2] predicted_index = torch.argmax(logits[0, -1, :]).item() predicted_text = tokenizer.decode([predicted_index]) # results print('input text:', text) print('predicted text:', predicted_text) # predicted text: problémy ``` ### Generate few full sequences ```python text = "Umělá inteligence pomůže lidstvu překonat budoucí" encoded = tokenizer.encode(text, return_tensors='pt') # torch.random.manual_seed(0) # if you need reproducibility sample_outputs = model.generate(encoded, do_sample=True, max_length=encoded.size()[1]+20, no_repeat_ngram_size=2, top_p=0.95, top_k=50, temperature=0.65, num_return_sequences=3) for i, sample_output in enumerate(sample_outputs): print("{}: {}\n".format(i, tokenizer.decode(sample_output, skip_special_tokens=True))) ``` ## Limitations and bias The training data used for this model come from Czech Wikipedia dump. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their model card: > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don't support use-cases that require the generated text to be true. Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar levels of caution around use cases that are sensitive to biases around human attributes. ## Author Czech GPT-2 small was trained and evaluated by [Jiri Spitalsky](https://www.linkedin.com/in/jiri-spitalsky-09400a2) thanks to the computing power of the GPUs and other hardware generously provided by [ONYX engineering, spol. s r.o.](http://www.onyx.cz/). ## Citation My special thanks go to Pierre Guillou for his work **GPorTuguese-2 (Portuguese GPT-2 small): a Language Model for Portuguese text generation (and more NLP tasks...)**, my work would not be possible without it.
AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- library_name: stable-baselines3 tags: - Pendulum-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: TQC results: - metrics: - type: mean_reward value: -171.32 +/- 96.54 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pendulum-v1 type: Pendulum-v1 --- # **TQC** Agent playing **Pendulum-v1** This is a trained model of a **TQC** agent playing **Pendulum-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo tqc --env Pendulum-v1 -orga sb3 -f logs/ python enjoy.py --algo tqc --env Pendulum-v1 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo tqc --env Pendulum-v1 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo tqc --env Pendulum-v1 -f logs/ -orga sb3 ``` ## Hyperparameters ```python OrderedDict([('learning_rate', 0.001), ('n_timesteps', 20000), ('policy', 'MlpPolicy'), ('normalize', False)]) ```
AnonymousSub/rule_based_roberta_hier_triplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - translation metrics: - bleu model-index: - name: mbart50-finetuned-multi30-en-to-de results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart50-finetuned-multi30-en-to-de This model is a fine-tuned version of [facebook/mbart-large-50-one-to-many-mmt](https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5946 - Bleu: 48.2650 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.13.0 - Pytorch 1.10.0+cu113 - Datasets 2.2.2 - Tokenizers 0.10.3
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 230.42 +/- 83.51 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
This model, DeLADE+[CLS], is trained by fusing neural lexical and semantic components in single transformer using DistilBERT as a backbone. *[A Dense Representation Framework for Lexical and Semantic Matching](https://arxiv.org/pdf/2112.04666.pdf)* Sheng-Chieh Lin and Jimmy Lin. You can find the usage of the model in our [DHR repo](https://github.com/jacklin64/DHR): (1) [Inference on MSMARCO Passage Ranking](https://github.com/castorini/DHR/blob/main/docs/msmarco-passage-train-eval.md); (2) [Inference on BEIR datasets](https://github.com/castorini/DHR/blob/main/docs/beir-eval.md).
AnonymousSub/rule_based_roberta_only_classfn_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: - cs - cs tags: - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - private CNC dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (HT2A-C) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Headline + Text to Abstract`` (HT2A) which consists in generating a multi-sentence summary considered as an abstract from a Czech news text. ## Dataset The model has been trained on the private CNC dataset provided by Czech News Center. The dataset includes 3/4M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were set to 512 tokens for the encoder and 128 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 60 hours. During training, the model has seen 3712K documents corresponding to roughly 5.5 epochs. # Use Assuming you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-ht2a-c"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 128), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: mit tags: - generated_from_trainer datasets: - adversarial_qa model-index: - name: deberta-base-finetuned-squad1-aqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-finetuned-squad1-aqa This model is a fine-tuned version of [stevemobs/deberta-base-finetuned-squad1](https://huggingface.co/stevemobs/deberta-base-finetuned-squad1) on the adversarial_qa dataset. It achieves the following results on the evaluation set: - Loss: 1.5912 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.9115 | 1.0 | 2527 | 1.5572 | | 1.3429 | 2.0 | 5054 | 1.5912 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
AnonymousSub/rule_based_roberta_twostagetriplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT_100 Model This is the **ScholarBERT_100** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**221B tokens**). This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model is based on the same architecture as [BERT-large](https://huggingface.co/bert-large-cased) and has a total of 340M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 24 | | Hidden Size | 1024 | | Attention Heads | 16 | | Total Parameters | 340M | # Training Dataset The vocab and the model are pertrained on **100% of the PRD** scientific literature dataset. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT-XL_100 Model This is the **ScholarBERT-XL_100** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**221B tokens**). This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model has a total of 770M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 36 | | Hidden Size | 1280 | | Attention Heads | 20 | | Total Parameters | 770M | # Training Dataset The vocab and the model are pertrained on **100% of the PRD** scientific literature dataset. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](https://huggingface.co/globuslabs/ScholarBERT/resolve/main/corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-base-combined-squad1-aqa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-base-combined-squad1-aqa This model is a fine-tuned version of [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.9442 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.1133 | 1.0 | 9906 | 0.9652 | | 0.7943 | 2.0 | 19812 | 0.9442 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT_100_WB Model This is the **ScholarBERT_100_WB** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**221B tokens**). Additionally, the pretraining data also includes the Wikipedia+BookCorpus, which are used to pretrain the [BERT-base](https://huggingface.co/bert-base-cased) and [BERT-large](https://huggingface.co/bert-large-cased) models. This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model is based on the same architecture as [BERT-large](https://huggingface.co/bert-large-cased) and has a total of 340M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 24 | | Hidden Size | 1024 | | Attention Heads | 16 | | Total Parameters | 340M | # Training Dataset The vocab and the model are pertrained on **100% of the PRD** scientific literature dataset and the Wikipedia+BookCorpus. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](https://huggingface.co/globuslabs/ScholarBERT/resolve/main/corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
AnonymousSub/specter-bert-model_copy_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT_10_WB Model This is the **ScholarBERT_10_WB** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**22.1B tokens**). Additionally, the pretraining data also includes the Wikipedia+BookCorpus, which are used to pretrain the [BERT-base](https://huggingface.co/bert-base-cased) and [BERT-large](https://huggingface.co/bert-large-cased) models. This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model is based on the same architecture as [BERT-large](https://huggingface.co/bert-large-cased) and has a total of 340M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 24 | | Hidden Size | 1024 | | Attention Heads | 16 | | Total Parameters | 340M | # Training Dataset The vocab and the model are pertrained on **10% of the PRD** scientific literature dataset and Wikipedia+BookCorpus. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](https://huggingface.co/globuslabs/ScholarBERT/resolve/main/corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
AnonymousSub/unsup-consert-base_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: en tags: - science - multi-displinary license: apache-2.0 --- # ScholarBERT-XL_1 Model This is the **ScholarBERT-XL_1** variant of the ScholarBERT model family. The model is pretrained on a large collection of scientific research articles (**2.2B tokens**). This is a **cased** (case-sensitive) model. The tokenizer will not convert all inputs to lower-case by default. The model has a total of 770M parameters. # Model Architecture | Hyperparameter | Value | |-----------------|:-------:| | Layers | 36 | | Hidden Size | 1280 | | Attention Heads | 20 | | Total Parameters | 770M | # Training Dataset The vocab and the model are pertrained on **1% of the PRD** scientific literature dataset. The PRD dataset is provided by Public.Resource.Org, Inc. (“Public Resource”), a nonprofit organization based in California. This dataset was constructed from a corpus of journal article files, from which We successfully extracted text from 75,496,055 articles from 178,928 journals. The articles span across Arts & Humanities, Life Sciences & Biomedicine, Physical Sciences, Social Sciences, and Technology. The distribution of articles is shown below. ![corpus pie chart](https://huggingface.co/globuslabs/ScholarBERT/resolve/main/corpus_pie_chart.png) # BibTeX entry and citation info If using this model, please cite this paper: ``` @misc{hong2022scholarbert, doi = {10.48550/ARXIV.2205.11342}, url = {https://arxiv.org/abs/2205.11342}, author = {Hong, Zhi and Ajith, Aswathy and Pauloski, Gregory and Duede, Eamon and Malamud, Carl and Magoulas, Roger and Chard, Kyle and Foster, Ian}, title = {ScholarBERT: Bigger is Not Always Better}, publisher = {arXiv}, year = {2022} } ```
AnonymousSub/unsup-consert-emanuals
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en datasets: - amazon_reviews_multi tags: - summarization license: apache-2.0 --- T5-base model for text summarization finetuned on subset of amazon reviews for english language. ## Rouge scores - Rouge 1 : 0.5019 - Rouge 2 : 0.4226 - Rouge L : 0.4877 - Rouge Lsum : 0.4877
AnonymousSub/unsup-consert-papers-bert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - cs - cs tags: - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - SumeCzech dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (HT2A-S) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Headline + Text to Abstract`` (HT2A) which consists in generating a multi-sentence summary considered as an abstract from a Czech news text. ## Dataset The model has been trained on the [SumeCzech](https://ufal.mff.cuni.cz/sumeczech) dataset. The dataset includes around 1M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were configured for 512 tokens for the encoder and 128 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 20 hours, 1x NVIDIA Tesla V100 32GB for 40 hours, and 4x NVIDIA Tesla A100 40GB for 20 hours. During training, the model has seen 6928K documents corresponding to roughly 8 epochs. # Use Assuming you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-ht2a-s"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 128), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
Anonymreign/savagebeta
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - cs - cs tags: - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - SumeCzech dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (AT2H-S) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Abstract + Text to Headline`` (AT2H) which consists in generating a one- or two-sentence summary considered as a headline from a Czech news text. ## Dataset The model has been trained on the [SumeCzech](https://ufal.mff.cuni.cz/sumeczech) dataset. The dataset includes around 1M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were configured for 512 tokens for the encoder and 64 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 40 hours. During training, the model has seen 2576K documents corresponding to roughly 3 epochs. # Use Assuming you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-at2h-s"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 64), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
AnthonyNelson/DialoGPT-small-ricksanchez
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-6 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.2459 - Wer: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.003 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:---:| | 4.5873 | 1.56 | 200 | 5.4586 | 1.0 | | 4.1846 | 3.12 | 400 | 5.2278 | 1.0 | | 4.1711 | 4.69 | 600 | 5.3131 | 1.0 | | 4.1581 | 6.25 | 800 | 5.2558 | 1.0 | | 4.1275 | 7.81 | 1000 | 5.2556 | 1.0 | | 4.1452 | 9.38 | 1200 | 5.2637 | 1.0 | | 4.1614 | 10.94 | 1400 | 5.2847 | 1.0 | | 4.1667 | 12.5 | 1600 | 5.2349 | 1.0 | | 4.1471 | 14.06 | 1800 | 5.2850 | 1.0 | | 4.1268 | 15.62 | 2000 | 5.2510 | 1.0 | | 4.1701 | 17.19 | 2200 | 5.2605 | 1.0 | | 4.1459 | 18.75 | 2400 | 5.2493 | 1.0 | | 4.1411 | 20.31 | 2600 | 5.2649 | 1.0 | | 4.1351 | 21.88 | 2800 | 5.2541 | 1.0 | | 4.1442 | 23.44 | 3000 | 5.2459 | 1.0 | | 4.1805 | 25.0 | 3200 | 5.2232 | 1.0 | | 4.1262 | 26.56 | 3400 | 5.2384 | 1.0 | | 4.145 | 28.12 | 3600 | 5.2522 | 1.0 | | 4.142 | 29.69 | 3800 | 5.2459 | 1.0 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Anthos23/my-awesome-model
[ "pytorch", "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
Work in progress <br> Finetuned model for abstractive summarization coming soon <br>
AntonClaesson/movie-plot-generator
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - cs - cs tags: - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - private Czech News Center dataset news-based - SumeCzech dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (AT2H-CS) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Abstract + Text to Headline`` (AT2H) which consists in generating a one- or two-sentence summary considered as a headline from a Czech news text. ## Dataset The model has been trained on a large Czech news dataset developed by a concatenation of two datasets, the private CNC dataset provided by Czech News Center and [SumeCzech](https://ufal.mff.cuni.cz/sumeczech) dataset. The dataset includes around 1.75M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were set to 512 tokens for the encoder and 64 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 40 hours, 1x NVIDIA Tesla V100 32GB for 20 hours, and 4x NVIDIA Tesla A100 40GB for 20 hours. During training, the model has seen 7936K documents corresponding to roughly 5 epochs. # Use Assuming that you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-at2h-cs"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 64), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
Antony/mint_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - cs - cs tags: - Summarization - abstractive summarization - mbart-cc25 - Czech license: apache-2.0 datasets: - private Czech News Center dataset news-based - SumeCzech dataset news-based metrics: - rouge - rougeraw --- # mBART fine-tuned model for Czech abstractive summarization (HT2A-CS) This model is a fine-tuned checkpoint of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the Czech news dataset to produce Czech abstractive summaries. ## Task The model deals with the task ``Headline + Text to Abstract`` (HT2A) which consists in generating a multi-sentence summary considered as an abstract from a Czech news text. ## Dataset The model has been trained on a large Czech news dataset developed by a concatenation of two datasets, the private CNC dataset provided by Czech News Center and [SumeCzech](https://ufal.mff.cuni.cz/sumeczech) dataset. The dataset includes around 1.75M Czech news-based documents consisting of a Headline, Abstract, and Full-text sections. Truncation and padding were set to 512 tokens for the encoder and 128 for the decoder. ## Training The model has been trained on 1x NVIDIA Tesla A100 40GB for 60 hours and 4x NVIDIA Tesla A100 40GB for 40 hours. During training, the model has seen 12896K documents corresponding to roughly 8.4 epochs. # Use Assuming that you are using the provided Summarizer.ipynb file. ```python def summ_config(): cfg = OrderedDict([ # summarization model - checkpoint from website ("model_name", "krotima1/mbart-ht2a-cs"), ("inference_cfg", OrderedDict([ ("num_beams", 4), ("top_k", 40), ("top_p", 0.92), ("do_sample", True), ("temperature", 0.89), ("repetition_penalty", 1.2), ("no_repeat_ngram_size", None), ("early_stopping", True), ("max_length", 128), ("min_length", 10), ])), #texts to summarize ("text", [ "Input your Czech text", ] ), ]) return cfg cfg = summ_config() #load model model = AutoModelForSeq2SeqLM.from_pretrained(cfg["model_name"]) tokenizer = AutoTokenizer.from_pretrained(cfg["model_name"]) # init summarizer summarize = Summarizer(model, tokenizer, cfg["inference_cfg"]) summarize(cfg["text"]) ```
Anubhav23/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="gitierrez/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Apisate/Discord-Ai-Bot
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 205.73 +/- 73.16 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Appolo/TestModel
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-23T00:50:06Z
--- tags: - espnet - audio - automatic-speech-recognition language: be datasets: - commonvoice license: cc-by-4.0 --- ## ESPnet2 ASR model ### `espnet/belarusian_commonvoice_blstm` This model was trained by dzeinali using commonvoice recipe in [espnet](https://github.com/espnet/espnet/). ### Demo: How to use in ESPnet2 ```bash cd espnet git checkout 716eb8f92e19708acfd08ba3bd39d40890d3a84b pip install -e . cd egs2/commonvoice/asr1 ./run.sh --skip_data_prep false --skip_train true --download_model espnet/belarusian_commonvoice_blstm ``` <!-- Generated by scripts/utils/show_asr_result.sh --> # RESULTS ## Environments - date: `Thu May 19 18:39:24 EDT 2022` - python version: `3.9.5 (default, Jun 4 2021, 12:28:51) [GCC 7.5.0]` - espnet version: `espnet 0.10.6a1` - pytorch version: `pytorch 1.8.1+cu102` - Git hash: `716eb8f92e19708acfd08ba3bd39d40890d3a84b` - Commit date: `Thu Apr 28 19:50:59 2022 -0400` ## asr_train_asr_rnn_raw_be_bpe150_sp ### WER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_asr_model_valid.acc.best/test_be|15801|128674|84.2|14.4|1.5|1.5|17.3|62.1| ### CER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_asr_model_valid.acc.best/test_be|15801|906360|97.1|1.8|1.2|0.7|3.6|62.1| ### TER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_asr_model_valid.acc.best/test_be|15801|639798|95.5|2.9|1.6|0.9|5.3|62.1| ## ASR config <details><summary>expand</summary> ``` config: conf/tuning/train_asr_rnn.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/asr_train_asr_rnn_raw_be_bpe150_sp ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false unused_parameters: false sharded_ddp: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 15 patience: 3 val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - train - loss - min - - valid - loss - min - - train - acc - max - - valid - acc - max keep_nbest_models: - 10 nbest_averaging_interval: 0 grad_clip: 5.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 1 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null use_matplotlib: true use_tensorboard: true use_wandb: false wandb_project: null wandb_id: null wandb_entity: null wandb_name: null wandb_model_log_interval: -1 detect_anomaly: false pretrain_path: null init_param: [] ignore_init_mismatch: false freeze_param: [] num_iters_per_epoch: null batch_size: 30 valid_batch_size: null batch_bins: 1000000 valid_batch_bins: null train_shape_file: - exp/asr_stats_raw_be_bpe150_sp/train/speech_shape - exp/asr_stats_raw_be_bpe150_sp/train/text_shape.bpe valid_shape_file: - exp/asr_stats_raw_be_bpe150_sp/valid/speech_shape - exp/asr_stats_raw_be_bpe150_sp/valid/text_shape.bpe batch_type: folded valid_batch_type: null fold_length: - 80000 - 150 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/train_be_sp/wav.scp - speech - sound - - dump/raw/train_be_sp/text - text - text valid_data_path_and_name_and_type: - - dump/raw/dev_be/wav.scp - speech - sound - - dump/raw/dev_be/text - text - text allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 valid_max_cache_size: null optim: adadelta optim_conf: lr: 0.1 scheduler: null scheduler_conf: {} token_list: - <blank> - <unk> - ▁ - я - с - а - . - н - о - м - е - у - ',' - на - ра - ў - д - р - т - і - ва - х - к - ка - й - б - з - ць - п - ц - лі - ны - ▁па - ма - г - ▁с - ла - ▁і - ю - ш - в - та - ы - ры - э - га - кі - ▁з - ні - ▁на - л - ча - да - ё - ле - ▁не - ль - ч - ▁ў - ж - не - ▁а - чы - ты - цца - ля - ▁за - ▁пра - мі - ве - бы - рэ - ▁А - ста - ку - го - цы - пе - ст - ▁вы - па - ▁ад - ▁я - ці - ба - дзе - ▁да - вы - оў - ту - ко - ды - ▁гэта - ло - ▁што - ▁ка - ф - жа - П - ві - дзі - Я - Т - '"' - ь - '?' - І - М - З - Д - В - Н - С - '!' - К - '-' - '''' - Б - Ц - Г - Р - Ш - Х - Л - А - Ж - Ф - У - Э - О - Е - I - — - Ю - ‒ - ':' - Ў - – - » - Ы - ; - ’ - Ь - Й - ґ - « - Ч - N - Ё - <sos/eos> init: null input_size: null ctc_conf: dropout_rate: 0.0 ctc_type: builtin reduce: true ignore_nan_grad: true joint_net_conf: null model_conf: ctc_weight: 0.5 use_preprocessor: true token_type: bpe bpemodel: data/be_token_list/bpe_unigram150/bpe.model non_linguistic_symbols: null cleaner: null g2p: null speech_volume_normalize: null rir_scp: null rir_apply_prob: 1.0 noise_scp: null noise_apply_prob: 1.0 noise_db_range: '13_15' frontend: default frontend_conf: fs: 16k specaug: specaug specaug_conf: apply_time_warp: true time_warp_window: 5 time_warp_mode: bicubic apply_freq_mask: true freq_mask_width_range: - 0 - 27 num_freq_mask: 2 apply_time_mask: true time_mask_width_ratio_range: - 0.0 - 0.05 num_time_mask: 2 normalize: global_mvn normalize_conf: stats_file: exp/asr_stats_raw_be_bpe150_sp/train/feats_stats.npz preencoder: null preencoder_conf: {} encoder: vgg_rnn encoder_conf: rnn_type: lstm bidirectional: true use_projection: true num_layers: 4 hidden_size: 1024 output_size: 1024 postencoder: null postencoder_conf: {} decoder: rnn decoder_conf: num_layers: 2 hidden_size: 1024 sampling_probability: 0 att_conf: atype: location adim: 1024 aconv_chans: 10 aconv_filts: 100 required: - output_dir - token_list version: 0.10.6a1 distributed: false ``` </details> ### Citing ESPnet ```BibTex @inproceedings{watanabe2018espnet, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, title={{ESPnet}: End-to-End Speech Processing Toolkit}, year={2018}, booktitle={Proceedings of Interspeech}, pages={2207--2211}, doi={10.21437/Interspeech.2018-1456}, url={http://dx.doi.org/10.21437/Interspeech.2018-1456} } ``` or arXiv: ```bibtex @misc{watanabe2018espnet, title={ESPnet: End-to-End Speech Processing Toolkit}, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, year={2018}, eprint={1804.00015}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
ArBert/albert-base-v2-finetuned-ner-agglo-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-turkish-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-turkish-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.3701 - Wer: 0.2946 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 32 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 3.8287 | 3.67 | 400 | 0.6628 | 0.6928 | | 0.3926 | 7.34 | 800 | 0.4257 | 0.4716 | | 0.1847 | 11.01 | 1200 | 0.4034 | 0.3931 | | 0.1273 | 14.68 | 1600 | 0.4094 | 0.3664 | | 0.0991 | 18.35 | 2000 | 0.4133 | 0.3375 | | 0.0811 | 22.02 | 2400 | 0.4021 | 0.3301 | | 0.0646 | 25.69 | 2800 | 0.3949 | 0.3166 | | 0.0513 | 29.36 | 3200 | 0.3701 | 0.2946 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
ArBert/albert-base-v2-finetuned-ner-agglo
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 - precision - recall model-index: - name: bert_sentence_classifier results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert_sentence_classifier This model is a fine-tuned version of [bert-large-cased](https://huggingface.co/bert-large-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0040 - F1: 0.6123 - Precision: 0.6123 - Recall: 0.6123 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Precision | Recall | |:-------------:|:-----:|:------:|:---------------:|:------:|:---------:|:------:| | 2.0049 | 0.04 | 500 | 1.5854 | 0.5693 | 0.5693 | 0.5693 | | 1.552 | 0.07 | 1000 | 1.4428 | 0.6131 | 0.6131 | 0.6131 | | 1.502 | 0.11 | 1500 | 1.3977 | 0.6213 | 0.6213 | 0.6213 | | 1.4515 | 0.14 | 2000 | 1.3926 | 0.6200 | 0.6200 | 0.6200 | | 1.43 | 0.18 | 2500 | 1.3553 | 0.6350 | 0.6350 | 0.6350 | | 1.413 | 0.21 | 3000 | 1.3461 | 0.6346 | 0.6346 | 0.6346 | | 1.4109 | 0.25 | 3500 | 1.3199 | 0.6496 | 0.6496 | 0.6496 | | 1.3853 | 0.28 | 4000 | 1.3338 | 0.6406 | 0.6406 | 0.6406 | | 1.3788 | 0.32 | 4500 | 1.3306 | 0.6471 | 0.6471 | 0.6471 | | 1.3585 | 0.35 | 5000 | 1.3295 | 0.6410 | 0.6410 | 0.6410 | | 1.356 | 0.39 | 5500 | 1.3025 | 0.6441 | 0.6441 | 0.6441 | | 1.3534 | 0.42 | 6000 | 1.3197 | 0.6406 | 0.6406 | 0.6406 | | 1.3324 | 0.46 | 6500 | 1.2932 | 0.6436 | 0.6436 | 0.6436 | | 1.3563 | 0.49 | 7000 | 1.3202 | 0.6488 | 0.6488 | 0.6488 | | 1.3121 | 0.53 | 7500 | 1.3024 | 0.6428 | 0.6428 | 0.6428 | | 1.3092 | 0.56 | 8000 | 1.3142 | 0.6419 | 0.6419 | 0.6419 | | 1.3769 | 0.6 | 8500 | 1.2974 | 0.6441 | 0.6441 | 0.6441 | | 1.3487 | 0.63 | 9000 | 1.2882 | 0.6556 | 0.6556 | 0.6556 | | 1.3475 | 0.67 | 9500 | 1.2928 | 0.6441 | 0.6441 | 0.6441 | | 1.3038 | 0.7 | 10000 | 1.2846 | 0.6488 | 0.6488 | 0.6488 | | 1.3371 | 0.74 | 10500 | 1.2894 | 0.6591 | 0.6591 | 0.6591 | | 1.3222 | 0.77 | 11000 | 1.2745 | 0.6535 | 0.6535 | 0.6535 | | 1.2983 | 0.81 | 11500 | 1.2832 | 0.6526 | 0.6526 | 0.6526 | | 1.3505 | 0.84 | 12000 | 1.2812 | 0.6531 | 0.6531 | 0.6531 | | 1.2752 | 0.88 | 12500 | 1.2629 | 0.6578 | 0.6578 | 0.6578 | | 1.3115 | 0.91 | 13000 | 1.2787 | 0.6453 | 0.6453 | 0.6453 | | 1.3353 | 0.95 | 13500 | 1.2707 | 0.6539 | 0.6539 | 0.6539 | | 1.2982 | 0.98 | 14000 | 1.2618 | 0.6569 | 0.6569 | 0.6569 | | 1.1885 | 1.02 | 14500 | 1.2999 | 0.6544 | 0.6544 | 0.6544 | | 1.1339 | 1.05 | 15000 | 1.3086 | 0.6458 | 0.6458 | 0.6458 | | 1.0661 | 1.09 | 15500 | 1.2871 | 0.6582 | 0.6582 | 0.6582 | | 1.109 | 1.12 | 16000 | 1.2800 | 0.6608 | 0.6608 | 0.6608 | | 1.0305 | 1.16 | 16500 | 1.3098 | 0.6604 | 0.6604 | 0.6604 | | 1.0855 | 1.19 | 17000 | 1.2968 | 0.6587 | 0.6587 | 0.6587 | | 1.0933 | 1.23 | 17500 | 1.3075 | 0.6509 | 0.6509 | 0.6509 | | 1.1229 | 1.26 | 18000 | 1.3018 | 0.6496 | 0.6496 | 0.6496 | | 1.1043 | 1.3 | 18500 | 1.2832 | 0.6565 | 0.6565 | 0.6565 | | 1.1344 | 1.33 | 19000 | 1.2825 | 0.6591 | 0.6591 | 0.6591 | | 1.1467 | 1.37 | 19500 | 1.2797 | 0.6642 | 0.6642 | 0.6642 | | 1.0596 | 1.4 | 20000 | 1.2841 | 0.6522 | 0.6522 | 0.6522 | | 1.1286 | 1.44 | 20500 | 1.2912 | 0.6544 | 0.6544 | 0.6544 | | 1.1219 | 1.47 | 21000 | 1.3143 | 0.6509 | 0.6509 | 0.6509 | | 1.1339 | 1.51 | 21500 | 1.3021 | 0.6539 | 0.6539 | 0.6539 | | 1.1091 | 1.54 | 22000 | 1.2738 | 0.6625 | 0.6625 | 0.6625 | | 1.1403 | 1.58 | 22500 | 1.2822 | 0.6548 | 0.6548 | 0.6548 | | 1.146 | 1.61 | 23000 | 1.2724 | 0.6587 | 0.6587 | 0.6587 | | 1.1237 | 1.65 | 23500 | 1.2757 | 0.6569 | 0.6569 | 0.6569 | | 1.1453 | 1.68 | 24000 | 1.2985 | 0.6535 | 0.6535 | 0.6535 | | 1.1309 | 1.72 | 24500 | 1.2876 | 0.6578 | 0.6578 | 0.6578 | | 1.1494 | 1.75 | 25000 | 1.2892 | 0.6552 | 0.6552 | 0.6552 | | 1.1571 | 1.79 | 25500 | 1.2806 | 0.6548 | 0.6548 | 0.6548 | | 1.0766 | 1.82 | 26000 | 1.2889 | 0.6509 | 0.6509 | 0.6509 | | 1.1416 | 1.86 | 26500 | 1.2673 | 0.6599 | 0.6599 | 0.6599 | | 1.1179 | 1.89 | 27000 | 1.2919 | 0.6501 | 0.6501 | 0.6501 | | 1.0838 | 1.93 | 27500 | 1.3198 | 0.6488 | 0.6488 | 0.6488 | | 1.1426 | 1.96 | 28000 | 1.2766 | 0.6561 | 0.6561 | 0.6561 | | 1.1559 | 2.0 | 28500 | 1.2839 | 0.6561 | 0.6561 | 0.6561 | | 0.8783 | 2.03 | 29000 | 1.3377 | 0.6509 | 0.6509 | 0.6509 | | 0.8822 | 2.07 | 29500 | 1.3813 | 0.6501 | 0.6501 | 0.6501 | | 0.8823 | 2.1 | 30000 | 1.3738 | 0.6514 | 0.6514 | 0.6514 | | 0.9094 | 2.14 | 30500 | 1.3667 | 0.6522 | 0.6522 | 0.6522 | | 0.8828 | 2.17 | 31000 | 1.3654 | 0.6582 | 0.6582 | 0.6582 | | 0.8489 | 2.21 | 31500 | 1.3404 | 0.6556 | 0.6556 | 0.6556 | | 0.8719 | 2.24 | 32000 | 1.4173 | 0.6393 | 0.6393 | 0.6393 | | 0.8926 | 2.28 | 32500 | 1.4026 | 0.6535 | 0.6535 | 0.6535 | | 0.871 | 2.31 | 33000 | 1.4133 | 0.6428 | 0.6428 | 0.6428 | | 0.9047 | 2.35 | 33500 | 1.3915 | 0.6449 | 0.6449 | 0.6449 | | 0.8621 | 2.38 | 34000 | 1.4109 | 0.6483 | 0.6483 | 0.6483 | | 0.8978 | 2.42 | 34500 | 1.3675 | 0.6471 | 0.6471 | 0.6471 | | 0.8808 | 2.45 | 35000 | 1.3826 | 0.6522 | 0.6522 | 0.6522 | | 0.9299 | 2.49 | 35500 | 1.3673 | 0.6535 | 0.6535 | 0.6535 | | 0.8546 | 2.52 | 36000 | 1.4034 | 0.6518 | 0.6518 | 0.6518 | | 0.8855 | 2.56 | 36500 | 1.3763 | 0.6458 | 0.6458 | 0.6458 | | 0.8996 | 2.59 | 37000 | 1.3930 | 0.6539 | 0.6539 | 0.6539 | | 0.8889 | 2.63 | 37500 | 1.3966 | 0.6471 | 0.6471 | 0.6471 | | 0.8811 | 2.66 | 38000 | 1.4131 | 0.6475 | 0.6475 | 0.6475 | | 0.9129 | 2.7 | 38500 | 1.3816 | 0.6445 | 0.6445 | 0.6445 | | 0.8708 | 2.73 | 39000 | 1.4354 | 0.6492 | 0.6492 | 0.6492 | | 0.8667 | 2.77 | 39500 | 1.4076 | 0.6380 | 0.6380 | 0.6380 | | 0.9139 | 2.8 | 40000 | 1.4200 | 0.6423 | 0.6423 | 0.6423 | | 0.9035 | 2.84 | 40500 | 1.3913 | 0.6462 | 0.6462 | 0.6462 | | 0.9312 | 2.87 | 41000 | 1.3806 | 0.6449 | 0.6449 | 0.6449 | | 0.9382 | 2.91 | 41500 | 1.4064 | 0.6522 | 0.6522 | 0.6522 | | 0.8765 | 2.95 | 42000 | 1.4146 | 0.6380 | 0.6380 | 0.6380 | | 0.8801 | 2.98 | 42500 | 1.3898 | 0.6445 | 0.6445 | 0.6445 | | 0.7988 | 3.02 | 43000 | 1.4740 | 0.6436 | 0.6436 | 0.6436 | | 0.6752 | 3.05 | 43500 | 1.5622 | 0.6372 | 0.6372 | 0.6372 | | 0.649 | 3.09 | 44000 | 1.6055 | 0.6359 | 0.6359 | 0.6359 | | 0.669 | 3.12 | 44500 | 1.5736 | 0.6380 | 0.6380 | 0.6380 | | 0.7189 | 3.16 | 45000 | 1.5832 | 0.6346 | 0.6346 | 0.6346 | | 0.6724 | 3.19 | 45500 | 1.6194 | 0.6260 | 0.6260 | 0.6260 | | 0.7139 | 3.23 | 46000 | 1.5966 | 0.6359 | 0.6359 | 0.6359 | | 0.6985 | 3.26 | 46500 | 1.5803 | 0.6342 | 0.6342 | 0.6342 | | 0.6503 | 3.3 | 47000 | 1.6485 | 0.6376 | 0.6376 | 0.6376 | | 0.6879 | 3.33 | 47500 | 1.5959 | 0.6325 | 0.6325 | 0.6325 | | 0.7342 | 3.37 | 48000 | 1.5534 | 0.6389 | 0.6389 | 0.6389 | | 0.6838 | 3.4 | 48500 | 1.5807 | 0.6337 | 0.6337 | 0.6337 | | 0.7295 | 3.44 | 49000 | 1.6192 | 0.6372 | 0.6372 | 0.6372 | | 0.7044 | 3.47 | 49500 | 1.6618 | 0.6346 | 0.6346 | 0.6346 | | 0.7071 | 3.51 | 50000 | 1.6255 | 0.6342 | 0.6342 | 0.6342 | | 0.7055 | 3.54 | 50500 | 1.5584 | 0.6363 | 0.6363 | 0.6363 | | 0.6781 | 3.58 | 51000 | 1.5948 | 0.6376 | 0.6376 | 0.6376 | | 0.7004 | 3.61 | 51500 | 1.6311 | 0.6320 | 0.6320 | 0.6320 | | 0.715 | 3.65 | 52000 | 1.5972 | 0.6423 | 0.6423 | 0.6423 | | 0.7399 | 3.68 | 52500 | 1.6402 | 0.6325 | 0.6325 | 0.6325 | | 0.6972 | 3.72 | 53000 | 1.6186 | 0.6406 | 0.6406 | 0.6406 | | 0.7219 | 3.75 | 53500 | 1.5945 | 0.6359 | 0.6359 | 0.6359 | | 0.763 | 3.79 | 54000 | 1.5900 | 0.6380 | 0.6380 | 0.6380 | | 0.7196 | 3.82 | 54500 | 1.6218 | 0.6320 | 0.6320 | 0.6320 | | 0.7682 | 3.86 | 55000 | 1.5538 | 0.6372 | 0.6372 | 0.6372 | | 0.6949 | 3.89 | 55500 | 1.6209 | 0.6295 | 0.6295 | 0.6295 | | 0.7461 | 3.93 | 56000 | 1.6237 | 0.6316 | 0.6316 | 0.6316 | | 0.7295 | 3.96 | 56500 | 1.6011 | 0.6333 | 0.6333 | 0.6333 | | 0.6846 | 4.0 | 57000 | 1.6899 | 0.6312 | 0.6312 | 0.6312 | | 0.556 | 4.03 | 57500 | 1.7783 | 0.6303 | 0.6303 | 0.6303 | | 0.5276 | 4.07 | 58000 | 1.8985 | 0.6260 | 0.6260 | 0.6260 | | 0.5576 | 4.1 | 58500 | 1.8263 | 0.6264 | 0.6264 | 0.6264 | | 0.5303 | 4.14 | 59000 | 1.8411 | 0.6316 | 0.6316 | 0.6316 | | 0.5574 | 4.17 | 59500 | 1.8353 | 0.6286 | 0.6286 | 0.6286 | | 0.5468 | 4.21 | 60000 | 1.9252 | 0.6286 | 0.6286 | 0.6286 | | 0.532 | 4.24 | 60500 | 1.8903 | 0.6295 | 0.6295 | 0.6295 | | 0.5329 | 4.28 | 61000 | 1.9416 | 0.6252 | 0.6252 | 0.6252 | | 0.5539 | 4.31 | 61500 | 1.9149 | 0.6260 | 0.6260 | 0.6260 | | 0.5661 | 4.35 | 62000 | 1.9074 | 0.6286 | 0.6286 | 0.6286 | | 0.5502 | 4.38 | 62500 | 2.0259 | 0.6316 | 0.6316 | 0.6316 | | 0.5658 | 4.42 | 63000 | 1.9049 | 0.6256 | 0.6256 | 0.6256 | | 0.5958 | 4.45 | 63500 | 1.9252 | 0.6166 | 0.6166 | 0.6166 | | 0.5972 | 4.49 | 64000 | 1.8518 | 0.6286 | 0.6286 | 0.6286 | | 0.5964 | 4.52 | 64500 | 1.8793 | 0.6234 | 0.6234 | 0.6234 | | 0.5506 | 4.56 | 65000 | 1.9218 | 0.6346 | 0.6346 | 0.6346 | | 0.5516 | 4.59 | 65500 | 1.8957 | 0.6389 | 0.6389 | 0.6389 | | 0.5777 | 4.63 | 66000 | 1.9603 | 0.6295 | 0.6295 | 0.6295 | | 0.5953 | 4.66 | 66500 | 1.8605 | 0.6252 | 0.6252 | 0.6252 | | 0.5797 | 4.7 | 67000 | 1.8797 | 0.6320 | 0.6320 | 0.6320 | | 0.5836 | 4.73 | 67500 | 1.9320 | 0.6260 | 0.6260 | 0.6260 | | 0.6019 | 4.77 | 68000 | 1.8465 | 0.6239 | 0.6239 | 0.6239 | | 0.6099 | 4.8 | 68500 | 1.9481 | 0.6299 | 0.6299 | 0.6299 | | 0.6064 | 4.84 | 69000 | 1.9033 | 0.6307 | 0.6307 | 0.6307 | | 0.5836 | 4.87 | 69500 | 1.8878 | 0.6234 | 0.6234 | 0.6234 | | 0.5766 | 4.91 | 70000 | 1.8860 | 0.6277 | 0.6277 | 0.6277 | | 0.623 | 4.94 | 70500 | 1.8033 | 0.6303 | 0.6303 | 0.6303 | | 0.596 | 4.98 | 71000 | 1.9038 | 0.6333 | 0.6333 | 0.6333 | | 0.537 | 5.01 | 71500 | 2.0795 | 0.6234 | 0.6234 | 0.6234 | | 0.4663 | 5.05 | 72000 | 2.0325 | 0.6217 | 0.6217 | 0.6217 | | 0.4173 | 5.08 | 72500 | 2.2377 | 0.6273 | 0.6273 | 0.6273 | | 0.4521 | 5.12 | 73000 | 2.1218 | 0.6217 | 0.6217 | 0.6217 | | 0.4243 | 5.15 | 73500 | 2.2731 | 0.6204 | 0.6204 | 0.6204 | | 0.4672 | 5.19 | 74000 | 2.2111 | 0.6247 | 0.6247 | 0.6247 | | 0.4884 | 5.22 | 74500 | 2.1027 | 0.6226 | 0.6226 | 0.6226 | | 0.4314 | 5.26 | 75000 | 2.2218 | 0.6230 | 0.6230 | 0.6230 | | 0.4581 | 5.29 | 75500 | 2.2036 | 0.6264 | 0.6264 | 0.6264 | | 0.4245 | 5.33 | 76000 | 2.2419 | 0.6200 | 0.6200 | 0.6200 | | 0.4391 | 5.36 | 76500 | 2.1762 | 0.6187 | 0.6187 | 0.6187 | | 0.4672 | 5.4 | 77000 | 2.2779 | 0.6179 | 0.6179 | 0.6179 | | 0.4821 | 5.43 | 77500 | 2.2881 | 0.6187 | 0.6187 | 0.6187 | | 0.4872 | 5.47 | 78000 | 2.2406 | 0.6119 | 0.6119 | 0.6119 | | 0.4584 | 5.5 | 78500 | 2.3521 | 0.6209 | 0.6209 | 0.6209 | | 0.4774 | 5.54 | 79000 | 2.2522 | 0.6174 | 0.6174 | 0.6174 | | 0.5151 | 5.57 | 79500 | 2.2233 | 0.6140 | 0.6140 | 0.6140 | | 0.493 | 5.61 | 80000 | 2.2333 | 0.6256 | 0.6256 | 0.6256 | | 0.4846 | 5.64 | 80500 | 2.1891 | 0.6200 | 0.6200 | 0.6200 | | 0.478 | 5.68 | 81000 | 2.3159 | 0.6196 | 0.6196 | 0.6196 | | 0.4851 | 5.71 | 81500 | 2.2356 | 0.6234 | 0.6234 | 0.6234 | | 0.4902 | 5.75 | 82000 | 2.3525 | 0.6222 | 0.6222 | 0.6222 | | 0.4992 | 5.79 | 82500 | 2.2111 | 0.6067 | 0.6067 | 0.6067 | | 0.4799 | 5.82 | 83000 | 2.2650 | 0.6131 | 0.6131 | 0.6131 | | 0.4849 | 5.86 | 83500 | 2.2628 | 0.6204 | 0.6204 | 0.6204 | | 0.4772 | 5.89 | 84000 | 2.2711 | 0.6174 | 0.6174 | 0.6174 | | 0.5465 | 5.93 | 84500 | 2.2793 | 0.6144 | 0.6144 | 0.6144 | | 0.4466 | 5.96 | 85000 | 2.2369 | 0.6166 | 0.6166 | 0.6166 | | 0.4885 | 6.0 | 85500 | 2.1963 | 0.6217 | 0.6217 | 0.6217 | | 0.3862 | 6.03 | 86000 | 2.4233 | 0.6174 | 0.6174 | 0.6174 | | 0.3738 | 6.07 | 86500 | 2.4405 | 0.6191 | 0.6191 | 0.6191 | | 0.349 | 6.1 | 87000 | 2.4512 | 0.6161 | 0.6161 | 0.6161 | | 0.3659 | 6.14 | 87500 | 2.5251 | 0.6226 | 0.6226 | 0.6226 | | 0.3365 | 6.17 | 88000 | 2.5326 | 0.6217 | 0.6217 | 0.6217 | | 0.3336 | 6.21 | 88500 | 2.4413 | 0.6179 | 0.6179 | 0.6179 | | 0.3632 | 6.24 | 89000 | 2.6415 | 0.6114 | 0.6114 | 0.6114 | | 0.3584 | 6.28 | 89500 | 2.5388 | 0.6179 | 0.6179 | 0.6179 | | 0.3891 | 6.31 | 90000 | 2.6418 | 0.6123 | 0.6123 | 0.6123 | | 0.3805 | 6.35 | 90500 | 2.6223 | 0.6127 | 0.6127 | 0.6127 | | 0.363 | 6.38 | 91000 | 2.5399 | 0.6131 | 0.6131 | 0.6131 | | 0.3723 | 6.42 | 91500 | 2.6033 | 0.6187 | 0.6187 | 0.6187 | | 0.3808 | 6.45 | 92000 | 2.5281 | 0.6243 | 0.6243 | 0.6243 | | 0.3921 | 6.49 | 92500 | 2.5814 | 0.6007 | 0.6007 | 0.6007 | | 0.3763 | 6.52 | 93000 | 2.6656 | 0.6058 | 0.6058 | 0.6058 | | 0.3921 | 6.56 | 93500 | 2.4935 | 0.6084 | 0.6084 | 0.6084 | | 0.3737 | 6.59 | 94000 | 2.7270 | 0.6166 | 0.6166 | 0.6166 | | 0.3766 | 6.63 | 94500 | 2.5289 | 0.6217 | 0.6217 | 0.6217 | | 0.4439 | 6.66 | 95000 | 2.6161 | 0.6222 | 0.6222 | 0.6222 | | 0.4166 | 6.7 | 95500 | 2.5298 | 0.6123 | 0.6123 | 0.6123 | | 0.4064 | 6.73 | 96000 | 2.5952 | 0.6183 | 0.6183 | 0.6183 | | 0.4253 | 6.77 | 96500 | 2.4567 | 0.6127 | 0.6127 | 0.6127 | | 0.3754 | 6.8 | 97000 | 2.5473 | 0.6131 | 0.6131 | 0.6131 | | 0.3993 | 6.84 | 97500 | 2.5563 | 0.6161 | 0.6161 | 0.6161 | | 0.3802 | 6.87 | 98000 | 2.6585 | 0.6076 | 0.6076 | 0.6076 | | 0.4504 | 6.91 | 98500 | 2.5700 | 0.6127 | 0.6127 | 0.6127 | | 0.3832 | 6.94 | 99000 | 2.5983 | 0.6174 | 0.6174 | 0.6174 | | 0.4212 | 6.98 | 99500 | 2.6137 | 0.6110 | 0.6110 | 0.6110 | | 0.3253 | 7.01 | 100000 | 2.8467 | 0.6024 | 0.6024 | 0.6024 | | 0.2553 | 7.05 | 100500 | 2.7412 | 0.6063 | 0.6063 | 0.6063 | | 0.2771 | 7.08 | 101000 | 2.8670 | 0.6101 | 0.6101 | 0.6101 | | 0.2733 | 7.12 | 101500 | 2.8536 | 0.6166 | 0.6166 | 0.6166 | | 0.2972 | 7.15 | 102000 | 2.8254 | 0.6161 | 0.6161 | 0.6161 | | 0.2893 | 7.19 | 102500 | 3.0228 | 0.6058 | 0.6058 | 0.6058 | | 0.3104 | 7.22 | 103000 | 2.8617 | 0.6011 | 0.6011 | 0.6011 | | 0.3019 | 7.26 | 103500 | 3.0106 | 0.6131 | 0.6131 | 0.6131 | | 0.3143 | 7.29 | 104000 | 3.0189 | 0.6088 | 0.6088 | 0.6088 | | 0.3054 | 7.33 | 104500 | 3.0291 | 0.6063 | 0.6063 | 0.6063 | | 0.3145 | 7.36 | 105000 | 3.0166 | 0.6106 | 0.6106 | 0.6106 | | 0.2913 | 7.4 | 105500 | 3.0480 | 0.6174 | 0.6174 | 0.6174 | | 0.3159 | 7.43 | 106000 | 2.9714 | 0.6084 | 0.6084 | 0.6084 | | 0.3216 | 7.47 | 106500 | 2.9359 | 0.6187 | 0.6187 | 0.6187 | | 0.2982 | 7.5 | 107000 | 3.0509 | 0.6084 | 0.6084 | 0.6084 | | 0.2952 | 7.54 | 107500 | 2.9428 | 0.6076 | 0.6076 | 0.6076 | | 0.304 | 7.57 | 108000 | 3.0155 | 0.6071 | 0.6071 | 0.6071 | | 0.2896 | 7.61 | 108500 | 3.0276 | 0.6196 | 0.6196 | 0.6196 | | 0.3226 | 7.64 | 109000 | 2.9331 | 0.6097 | 0.6097 | 0.6097 | | 0.299 | 7.68 | 109500 | 2.9671 | 0.6050 | 0.6050 | 0.6050 | | 0.3079 | 7.71 | 110000 | 2.9394 | 0.6093 | 0.6093 | 0.6093 | | 0.3064 | 7.75 | 110500 | 2.8690 | 0.6110 | 0.6110 | 0.6110 | | 0.3423 | 7.78 | 111000 | 2.9095 | 0.6183 | 0.6183 | 0.6183 | | 0.3085 | 7.82 | 111500 | 2.9967 | 0.6260 | 0.6260 | 0.6260 | | 0.3071 | 7.85 | 112000 | 2.9429 | 0.6127 | 0.6127 | 0.6127 | | 0.3197 | 7.89 | 112500 | 3.0123 | 0.6157 | 0.6157 | 0.6157 | | 0.3361 | 7.92 | 113000 | 2.9832 | 0.6170 | 0.6170 | 0.6170 | | 0.3252 | 7.96 | 113500 | 3.0174 | 0.6071 | 0.6071 | 0.6071 | | 0.2802 | 7.99 | 114000 | 3.0040 | 0.6123 | 0.6123 | 0.6123 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ArBert/albert-base-v2-finetuned-ner-gmm-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: ko tags: - korean --- https://github.com/BM-K/Sentence-Embedding-is-all-you-need # Korean-Sentence-Embedding 🍭 Korean sentence embedding repository. You can download the pre-trained models and inference right away, also it provides environments where individuals can train models. ## Quick tour ```python import torch from transformers import AutoModel, AutoTokenizer def cal_score(a, b): if len(a.shape) == 1: a = a.unsqueeze(0) if len(b.shape) == 1: b = b.unsqueeze(0) a_norm = a / a.norm(dim=1)[:, None] b_norm = b / b.norm(dim=1)[:, None] return torch.mm(a_norm, b_norm.transpose(0, 1)) * 100 model = AutoModel.from_pretrained('BM-K/KoSimCSE-bert') AutoTokenizer.from_pretrained('BM-K/KoSimCSE-bert') sentences = ['치타가 들판을 가로 질러 먹이를 쫓는다.', '치타 한 마리가 먹이 뒤에서 달리고 있다.', '원숭이 한 마리가 드럼을 연주한다.'] inputs = tokenizer(sentences, padding=True, truncation=True, return_tensors="pt") embeddings, _ = model(**inputs, return_dict=False) score01 = cal_score(embeddings[0][0], embeddings[1][0]) score02 = cal_score(embeddings[0][0], embeddings[2][0]) ``` ## Performance - Semantic Textual Similarity test set results <br> | Model | AVG | Cosine Pearson | Cosine Spearman | Euclidean Pearson | Euclidean Spearman | Manhattan Pearson | Manhattan Spearman | Dot Pearson | Dot Spearman | |------------------------|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:| | KoSBERT<sup>†</sup><sub>SKT</sub> | 77.40 | 78.81 | 78.47 | 77.68 | 77.78 | 77.71 | 77.83 | 75.75 | 75.22 | | KoSBERT | 80.39 | 82.13 | 82.25 | 80.67 | 80.75 | 80.69 | 80.78 | 77.96 | 77.90 | | KoSRoBERTa | 81.64 | 81.20 | 82.20 | 81.79 | 82.34 | 81.59 | 82.20 | 80.62 | 81.25 | | | | | | | | | | | | KoSentenceBART | 77.14 | 79.71 | 78.74 | 78.42 | 78.02 | 78.40 | 78.00 | 74.24 | 72.15 | | KoSentenceT5 | 77.83 | 80.87 | 79.74 | 80.24 | 79.36 | 80.19 | 79.27 | 72.81 | 70.17 | | | | | | | | | | | | KoSimCSE-BERT<sup>†</sup><sub>SKT</sub> | 81.32 | 82.12 | 82.56 | 81.84 | 81.63 | 81.99 | 81.74 | 79.55 | 79.19 | | KoSimCSE-BERT | 83.37 | 83.22 | 83.58 | 83.24 | 83.60 | 83.15 | 83.54 | 83.13 | 83.49 | | KoSimCSE-RoBERTa | 83.65 | 83.60 | 83.77 | 83.54 | 83.76 | 83.55 | 83.77 | 83.55 | 83.64 | | | | | | | | | | | | | KoSimCSE-BERT-multitask | 85.71 | 85.29 | 86.02 | 85.63 | 86.01 | 85.57 | 85.97 | 85.26 | 85.93 | | KoSimCSE-RoBERTa-multitask | 85.77 | 85.08 | 86.12 | 85.84 | 86.12 | 85.83 | 86.12 | 85.03 | 85.99 |
ArBert/albert-base-v2-finetuned-ner-kmeans-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="gitierrez/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
ArBert/bert-base-uncased-finetuned-ner-gmm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: Gusteau results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Gusteau This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0.16 ### Training results ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ArBert/bert-base-uncased-finetuned-ner-kmeans-twitter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - espnet - audio - automatic-speech-recognition language: noinfo datasets: - bn_openslr53 license: cc-by-4.0 --- ## ESPnet2 ASR model ### `espnet/bengali_blstm` This model was trained by dzeinali using bn_openslr53 recipe in [espnet](https://github.com/espnet/espnet/). ### Demo: How to use in ESPnet2 ```bash cd espnet git checkout 716eb8f92e19708acfd08ba3bd39d40890d3a84b pip install -e . cd egs2/bn_openslr53/asr1 ./run.sh --skip_data_prep false --skip_train true --download_model espnet/bengali_blstm ``` <!-- Generated by scripts/utils/show_asr_result.sh --> # RESULTS ## Environments - date: `Sun May 22 21:21:37 EDT 2022` - python version: `3.9.5 (default, Jun 4 2021, 12:28:51) [GCC 7.5.0]` - espnet version: `espnet 0.10.6a1` - pytorch version: `pytorch 1.8.1+cu102` - Git hash: `716eb8f92e19708acfd08ba3bd39d40890d3a84b` - Commit date: `Thu Apr 28 19:50:59 2022 -0400` ## asr_bn_rnn ### WER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_batch_size1_asr_model_valid.acc.best/sbn_test|2018|6470|79.8|17.9|2.3|2.2|22.4|43.3| ### CER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_batch_size1_asr_model_valid.acc.best/sbn_test|2018|39196|93.9|2.8|3.3|1.2|7.3|43.3| ### TER |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err| |---|---|---|---|---|---|---|---|---| |decode_rnn_batch_size1_asr_model_valid.acc.best/sbn_test|2018|15595|84.8|8.9|6.3|1.0|16.2|43.1| ## ASR config <details><summary>expand</summary> ``` config: conf/train_asr_rnn.yaml print_config: false log_level: INFO dry_run: false iterator_type: sequence output_dir: exp/asr_bn_rnn ngpu: 1 seed: 0 num_workers: 1 num_att_plot: 3 dist_backend: nccl dist_init_method: env:// dist_world_size: null dist_rank: null local_rank: 0 dist_master_addr: null dist_master_port: null dist_launcher: null multiprocessing_distributed: false unused_parameters: false sharded_ddp: false cudnn_enabled: true cudnn_benchmark: false cudnn_deterministic: true collect_stats: false write_collected_feats: false max_epoch: 50 patience: 3 val_scheduler_criterion: - valid - loss early_stopping_criterion: - valid - loss - min best_model_criterion: - - train - loss - min - - valid - loss - min - - train - acc - max - - valid - acc - max keep_nbest_models: - 10 nbest_averaging_interval: 0 grad_clip: 5.0 grad_clip_type: 2.0 grad_noise: false accum_grad: 1 no_forward_run: false resume: true train_dtype: float32 use_amp: false log_interval: null use_matplotlib: true use_tensorboard: true use_wandb: false wandb_project: null wandb_id: null wandb_entity: null wandb_name: null wandb_model_log_interval: -1 detect_anomaly: false pretrain_path: null init_param: [] ignore_init_mismatch: false freeze_param: [] num_iters_per_epoch: null batch_size: 30 valid_batch_size: null batch_bins: 1000000 valid_batch_bins: null train_shape_file: - exp/asr_stats_raw_bpe1000/train/speech_shape - exp/asr_stats_raw_bpe1000/train/text_shape.bpe valid_shape_file: - exp/asr_stats_raw_bpe1000/valid/speech_shape - exp/asr_stats_raw_bpe1000/valid/text_shape.bpe batch_type: folded valid_batch_type: null fold_length: - 80000 - 150 sort_in_batch: descending sort_batch: descending multiple_iterator: false chunk_length: 500 chunk_shift_ratio: 0.5 num_cache_chunks: 1024 train_data_path_and_name_and_type: - - dump/raw/sbn_train/wav.scp - speech - sound - - dump/raw/sbn_train/text - text - text valid_data_path_and_name_and_type: - - dump/raw/sbn_dev/wav.scp - speech - sound - - dump/raw/sbn_dev/text - text - text allow_variable_data_keys: false max_cache_size: 0.0 max_cache_fd: 32 valid_max_cache_size: null optim: adadelta optim_conf: lr: 0.1 scheduler: null scheduler_conf: {} token_list: - <blank> - <unk> - র - ে - ন - ের - া - ল - ক - ্ - ো - ত - ি - স - ▁ - ই - ী - য় - ম - ু - ▁আ - প - ব - তে - দ - শ - কে - টি - ্য - হ - ▁এ - ▁না - ▁ব - ও - গ - ট - রা - ▁অ - জ - ▁বি - ▁বা - ▁স - না - ার - ▁করে - ধ - নি - ▁ম - লে - ▁জ - ▁ও - ▁হ - চ - তা - দের - ▁মা - িত - ▁থেকে - ্যা - ণ - '-' - ▁প্র - তি - ▁হয় - ায় - িক - ▁এক - ▁পা - ▁ক - ঁ - ভ - ▁ভ - ▁সা - লা - ▁শ - ',' - ্র - ▁এই - ▁নি - ▁প - বা - ▁পর - ফ - ▁সে - ক্ষ - ছে - মা - ষ - ▁কা - টা - বে - িয়া - ড় - ▁দ - ▁চ - লি - ▁ই - ▁হা - ▁তার - ▁যে - থ - । - ড - ুল - িয়ে - ▁গ - বি - ▁তা - রি - কা - ▁র - ▁ফ - পা - ▁ন - ▁করা - ং - ▁আর - উ - নে - খ - য়ে - ▁নিয়ে - ▁তিনি - ▁একটি - নের - ▁হয়েছে - ্ব - ▁ত - ▁জন্য - ▁যা - বার - ঙ্গ - ান - স্ত - কার - জা - ূ - ঠ - ুর - ▁হবে - ▁মি - দা - াই - ▁জা - ▁বলে - ▁কি - ড়া - ▁ঘ - ▁দু - হা - ত্র - ০ - ছেন - ▁কথা - সি - াম - ▁ছিল - ▁উ - ▁বল - ▁তাদের - ৃ - ▁রা - ▁সঙ্গে - ▁প্রতি - ▁এবং - ▁ধ - ▁ল - ছ - ▁খা - ▁বে - ▁সময় - য়া - জন - মি - ন্ত - ▁করতে - ▁সু - ▁করেন - ীর - ৌ - ▁অনেক - গুলো - ষ্ট - ধা - সা - ▁হয়ে - ▁মধ্যে - ▁চা - ▁লা - ির - ▁১ - ▁সং - োর - ভাবে - ▁আমি - ১ - শা - াল - জি - ▁তারা - ▁যায় - মান - ▁কাজ - ▁কিছু - ▁দিয়ে - টে - রণ - ▁ড - ▁উপ - স্থ - দি - সে - ▁মে - ▁সরকার - ▁খ - ▁পার - ীয় - ক্ত - ওয়া - স্ট - এ - ▁বাংলাদেশ - ড়ে - ন্ট - ▁২ - ▁আছে - ▁সব - ছি - ▁দি - ▁আমার - ▁এখন - মে - ▁বছর - ▁ট - ▁শা - কি - ন্ড - ▁নাম - ▁কোন - দিন - পুর - ▁সম্ - ছিল - ▁পুলিশ - ▁য - ৈ - ▁মানুষ - ▁দা - েই - ▁এর - ▁সালে - ▁কর - ঘ - গ্র - ▁দিন - ▁পারে - ্ম - ৫ - ▁দেশ - ▁দেখ - ▁স্ব - ▁সম - ▁১৯ - ▁সি - ▁শুরু - ▁প্রথম - ত্ - ▁তো - ্ট - ▁আগে - ▁কোনো - ▁রয়েছে - ▁হচ্ছে - ▁অব - ছিলেন - যোগ - জে - ▁ভারত - ▁নে - প্র - ▁সেই - গা - ▁গা - হি - ন্ন - ▁ছ - ▁জন - ▁নির্ - খা - পি - ▁পে - ▁স্ - াব - ▁মো - ▁অনু - ▁কিন্তু - ৯ - ▁পরি - ▁ঢাকা - তার - লো - ▁বিষয় - ▁তাঁর - ৪ - র্থ - ▁অ্যা - ▁ঘটনা - ▁শেষ - ড়ি - লেন - ▁আমাদের - ▁বড় - দেশ - ▁নেই - ▁ব্যা - ানো - ▁বেশি - মার - বাস - ▁তবে - ▁কো - শি - ▁বিভিন্ন - ▁নয় - ৭ - নী - ৩ - ▁দল - ▁দেখা - ঝ - ▁করার - ▁কে - ▁হলে - ুক - ▁গু - ▁৩ - ৬ - ▁মনে - ▁নির্বাচন - ▁রাজ - ▁করেছে - ীন - লের - িতে - ▁একটা - ঞ্চ - ▁রাখ - ▁থাক - ▁আমরা - ▁চল - ২ - ▁কাছে - ▁মু - ▁পড় - ▁সহ - ▁হিসেবে - জ্ঞ - ান্ত - ণ্ড - ৎ - য়ের - ▁পু - ▁একজন - ▁বলেন - ুন - িং - ’ - ▁বাংলা - টার - ুম - ঞ্জ - ▁বাড়ি - ▁গত - ▁হাজার - ▁মতো - ডি - ▁তিন - দ্ধ - ▁এমন - ▁কয়েক - ▁কম - ত্ব - ্রা - ▁দিকে - ▁ছিলেন - ▁পড়ে - নার - ▁করি - কাল - ▁মুখ - ▁উঠ - র্ত - ▁টাকা - চার - শে - ▁এসে - ▁দুই - ▁করেছেন - ▁লোক - ম্প - ৮ - ষ্ঠ - ▁মহা - ▁কু - ▁থাকে - বাদ - চি - ▁এলাকা - ▁জানান - ▁প্রায় - ▁দেয়া - ▁গেল - য - চ্ছে - ▁ছবি - ▁নতুন - ▁অবস্থা - ▁অভি - ▁আজ - ▁কার - ▁খু - ▁জানা - ▁করছে - টির - ▁বাংলাদেশের - ▁বন্ধ - কারী - ▁অন্য - ▁ধরে - প্ত - ▁তাকে - ▁গেছে - ▁শি - চা - আ - ▁চাল - ▁আল - ▁৫ - ▁উত্ত - ▁ঝ - ▁জীবন - লার - ঙ - ▁প্রকাশ - ▁মেয়ে - ▁রে - ▁দেশের - ▁খেল - ▁মূল - ভি - ঙ্ক - ▁চি - ▁পর্যন্ত - ▁সাথে - লাম - ▁৪ - ▁টি - ▁বো - ▁আইন - গত - ▁হতে - ▁ভালো - . - স্ক - ▁অভিযোগ - ন্স - ▁কারণে - ▁অর্থ - ▁অপ - ক্স - বু - ▁২০ - ▁পাওয়া - ▁খুব - ▁মন - সম - ল্লা - ব্দ - ▁পি - ▁ওই - ▁করবে - য়ার - সহ - ক্ষণ - ▁নারী - ম্ব - ▁ফা - ▁বেশ - ▁পেয়ে - দে - ▁তখন - িয়ার - ▁ক্যা - ▁ছেলে - ▁চার - ভার - ▁দিতে - ▁ক্র - ▁গান - বাহিনী - ▁ভি - কৃত - ▁গো - বল - ▁ইসলাম - ▁জি - ▁ডি - ন্দ্র - ▁গ্রাম - ▁ওপর - ▁ভোট - ▁পাঠ - ▁গিয়ে - ▁মামলা - ▁ব্যবস্থা - সার - যুক্ত - ▁মাস - দার - ▁সেখানে - ▁জন্ম - ▁পদ - ▁কেউ - র্ণ - ▁দেওয়া - ভাগ - ▁১০ - ▁উদ্ - োয়া - রূপ - ▁ফেল - ▁তৈরি - ▁খবর - ▁কেন - ▁ভাষা - ▁৬ - ▁ভাব - ▁নেতা - ▁জানিয়েছে - ▁কী - ফা - ▁থাকা - ▁লি - টের - ▁ছা - ▁হল - ▁গ্র - ▁কর্ম - ▁সদস্য - ▁জাতীয় - ▁ব্র - দু - ▁কেন্দ্র - ▁হওয়ার - ▁দেব - ▁চলে - ▁হলো - তু - ▁বিশ্ব - ▁যাওয়া - ▁যাবে - ▁ট্র - ▁সম্পর্ক - ▁দিয়েছে - ▁যদি - ▁বিরুদ্ধে - ▁বিশেষ - ▁করলে - ▁ছোট - ▁অধি - ▁শুন - ▁আবার - ▁কারণ - ▁দলের - ▁ফি - ▁স্ট - ▁দেয় - ▁শিল্প - ▁রাজনৈতিক - ▁বলা - ▁ছাড়া - ▁জেলা - ▁দেখে - ▁প্রধান - ▁এসব - বন্ধ - ▁কর্মকর্তা - চ্ছি - ▁তথ্য - ▁অংশ - ▁দশ - ▁তাহা - মন্ত্রী - ৃত - ▁ঠিক - ▁রাত - ▁আসা - ▁থানা - ▁গোল - রাজ - ▁মৃত্যু - ▁রি - ▁পথ - ্যান - ▁বিচার - ▁শ্রমিক - ▁গল্প - ▁সকাল - ▁হাতে - ▁এটা - ▁কবি - ▁বাবা - ▁দাবি - ▁চাই - ▁মাধ্যমে - ▁হয়েছিল - ▁ঢ - ▁যাচ্ছে - ▁২০০ - ▁চলচ্চিত্র - ▁রহমান - ▁লেখা - ▁দেন - ▁পুরুষ - চিত্র - ▁ব্যবহার - ▁অনুষ্ঠান - ▁বর্তমান - ▁ধর্ম - ▁দাঁড় - ▁নিহত - ঃ - চ্ছ - ▁চেষ্টা - ▁চোখ - ▁উপজেলা - ▁আদালত - ▁সামনে - ▁রু - ▁চেয়ে - ▁সর্ব - ▁হত্যা - ▁গণ - ▁ডাক - ▁দ্বিতীয় - ▁ধরনের - ▁কবিতা - ▁ফলে - ▁সবচেয়ে - গুলি - ▁মোট - ▁পরিবার - ▁শিশু - ▁হোসেন - ▁রেখে - ▁রায় - ▁মাথা - ▁দুর্ - ▁৮ - ▁টা - ▁৭ - ▁বসে - ▁ওয়া - ▁ব্যক্তি - ▁শুধু - ▁ব্যাংক - ▁পাকিস্তান - ▁যখন - ▁করিয়া - ▁লিখ - পূর্ণ - ▁বিশ্ববিদ্যালয় - ▁সংখ্যা - ▁যুদ্ধ - ▁হইয়া - ▁ক্ষমতা - ▁সাধারণ - ▁কোটি - ▁শিক্ষা - ▁আলো - ▁তুলে - ▁সত্য - ▁ঘটে - '''' - ▁দূর - ▁প্রশ্ন - ুদ্ধ - ▁লাখ - ▁নিজের - েশন - ▁আলোচনা - ঈ - ▁ক্রিকেট - ▁সমাজ - ▁বয়স - ▁গ্রহণ - ▁জায়গা - ▁ব্যবসা - বর্তী - জীব - কল্প - ▁প্রত্য - ▁মাত্র - ▁উৎ - ▁শহরে - ▁এখানে - ▁নেয়া - ▁ঘোষণা - ▁সকল - ▁আটক - ▁নিরাপত্তা - ▁পাঁচ - ▁পূর্ব - ▁রাষ্ট্র - ▁ভাই - ▁বহু - ▁পরীক্ষা - ▁পুরো - ▁বাইরে - ▁থাকবে - ▁ক্ষেত্রে - ▁স্থান - ▁ম্যাচ - ▁ঘরে - ▁সবাই - ার্ড - ▁উদ্ধার - ▁ইতিহাস - ▁সাহিত্য - ▁সুযোগ - ▁আন্দোলন - ▁যুক্তরাষ্ট্র - দর্শন - ▁১২ - ▁১৮ - ▁প্রেম - ▁আন্তর্জাতিক - ল্যান্ড - ▁সমস্যা - ▁বিভাগ - ▁সিদ্ধান্ত - ▁মধ্য - ন্দি - ▁ছাত্র - ▁গাড়ি - ▁দীর্ঘ - ▁সংবাদ - ▁প্রয়োজন - ▁সিনেমা - ▁রাজধানী - ▁স্থানীয় - ▁একটু - ▁বাজার - জ্জ - ▁পৃথিবী - ▁বিশ্বাস - ▁আহত - ▁দায়িত্ব - ▁হরতাল - ▁সম্ভব - ▁অফিস - ▁অভিনয় - ▁কলেজ - ▁চট্টগ্রাম - ▁ক্ল - ▁দক্ষিণ - ▁পক্ষে - ▁মুক্তি - ▁সংসদ - ‘ - ▁উপস্থিত - ▁ফিরে - ▁আগামী - ▁সংগঠন - ▁মিনিট - ▁হামলা - ▁প্রতিষ্ঠান - ▁পোশাক - ▁প্ল - ▁সৃষ্টি - ▁কমিশন - ▁আমাকে - ▁তদন্ত - ▁উচ্চ - ▁রাজনীতি - দ্দ - ▁দর্শক - ▁তুমি - ▁পরিস্থিতি - াহার - ▁ক্ষতি - ▁আত্ম - ▁গ্রেপ্তার - ▁ফুট - ▁পাশাপাশি - মূল - ▁প্রধানমন্ত্রী - কর্মী - ▁সুন্দর - ▁নিয়ম - ▁আগুন - বিজ্ঞান - ▁সাংবাদিক - ▁লক্ষ্য - ▁অবশ্য - ▁শরীর - ▁উল্লেখ - ▁শতাংশ - ▁স্কুল - ভূত - ▁গ্রন্থ - ▁কখনো - ▁প্রাণ - ▁কারখানা - ▁হিন্দু - ▁বিবিসি - ▁আপনার - ▁আহমেদ - ▁স্ত্রী - বর্ষ - ▁শক্তি - সভা - ▁রাস্তা - ▁রকম - ▁পশ্চিম - ▁অপরাধ - ▁আসছে - ▁সংস্থা - ▁পৌঁছ - ▁দোকান - ▁পত্রিকা - ▁লেখক - ▁সন্তান - ▁ভেতর - ▁এগিয়ে - ▁নদী - ▁হইল - ▁পরিবেশ - ▁প্রেসিডেন্ট - ▁ছেড়ে - ▁চেয়ারম্যান - ▁ধারা - বৃত্ত - ▁বিক্রি - ▁শ্রী - ▁রক্ষা - ▁দ্রুত - ▁পরিচয় - ▁মালিক - ▁উপন্যাস - ▁শিক্ষার্থী - ▁অন্যতম - ▁চরিত্র - ▁প্রতিবেদন - ▁প্রস্তুত - ▁অভিযান - তন্ত্র - ▁অগ্নি - ▁জনগণ - ▁বৃহস্পতিবার - ▁ব্যাপক - ▁অনুযায়ী - ▁পরিবর্তন - ▁কলকাতা - ভূমি - ▁নজরুল - ▁ভূমিকা - ▁জনপ্রিয় - ▁শিক্ষক - ▁তেমন - ▁অন্যান্য - ▁বিদ্যুৎ - খ্যাত - ▁অস্ত্র - ▁প্রস্তাব - ▁স্বামী - ▁পরিচিত - ▁আয়োজন - ▁শনিবার - ▁তাঁকে - ▁যাত্রী - প্রাপ্ত - ▁কর্মসূচি - ▁গঠন - ▁প্রভাব - ▁কৃষ্ণ - ▁সমাবেশ - ▁সূত্র - ▁অনুষ্ঠিত - ▁পর্যায়ে - ঋ - ▁পুরস্কার - ▁বিক্ষোভ - ▁নিয়ন্ত্রণ - ▁রোববার - ▁প্রার্থী - ▁যোগাযোগ - ▁সোমবার - ▁মার্চ - ▁কমিটি - ▁সংঘর্ষ - ▁বুধবার - ▁সামাজিক - ▁তাঁদের - ▁মার্কিন - ▁সামরিক - ▁নিজেদের - ▁মঙ্গলবার - ▁বক্তব্য - ▁চুক্তি - ▁যুগ - ▁বৈঠক - ▁ইউনিয়ন - ▁মোহাম্মদ - অ - ▁তাঁহার - ▁নির্মাণ - ▁জানুয়ারি - ▁আবেদন - ▁বিশ্বকাপ - ▁ফেব্রুয়ারি - ▁তরুণ - ▁হিসাব - ▁সন্ধ্যা - ▁পরিকল্পনা - ▁উইকেট - ▁ধারণা - ▁আনন্দ - মুক্ত - ▁উদ্দেশ্য - ▁চিকিৎসা - ▁উন্নয়ন - ▁আধুনিক - ▁ভিত্তি - ':' - "\x94" - ঢ - ‍ - ় - e - / - i - r - t - o - '%' - l - a - n - '!' - p - '"' - s - '?' - d - '0' - '3' - u - ঞ - f - g - c - m - h - – - w - b - ; - x - '8' - '5' - '9' - k - ” - y - H - L - T - j - ৗ - B - K - _ - z - “ - F - v - '4' - '1' - '2' - ঔ - ঊ - "\x93" - D - O - œ - ঐ - ৰ - — - <sos/eos> init: null input_size: null ctc_conf: dropout_rate: 0.0 ctc_type: builtin reduce: true ignore_nan_grad: true joint_net_conf: null model_conf: ctc_weight: 0.5 use_preprocessor: true token_type: bpe bpemodel: data/token_list/bpe_unigram1000/bpe.model non_linguistic_symbols: null cleaner: null g2p: null speech_volume_normalize: null rir_scp: null rir_apply_prob: 1.0 noise_scp: null noise_apply_prob: 1.0 noise_db_range: '13_15' frontend: default frontend_conf: fs: 16k specaug: specaug specaug_conf: apply_time_warp: true time_warp_window: 5 time_warp_mode: bicubic apply_freq_mask: true freq_mask_width_range: - 0 - 27 num_freq_mask: 2 apply_time_mask: true time_mask_width_ratio_range: - 0.0 - 0.05 num_time_mask: 2 normalize: global_mvn normalize_conf: stats_file: exp/asr_stats_raw_bpe1000/train/feats_stats.npz preencoder: null preencoder_conf: {} encoder: vgg_rnn encoder_conf: rnn_type: lstm bidirectional: true use_projection: true num_layers: 4 hidden_size: 1024 output_size: 1024 postencoder: null postencoder_conf: {} decoder: rnn decoder_conf: num_layers: 2 hidden_size: 1024 sampling_probability: 0 att_conf: atype: location adim: 1024 aconv_chans: 10 aconv_filts: 100 required: - output_dir - token_list version: 0.10.6a1 distributed: false ``` </details> ### Citing ESPnet ```BibTex @inproceedings{watanabe2018espnet, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, title={{ESPnet}: End-to-End Speech Processing Toolkit}, year={2018}, booktitle={Proceedings of Interspeech}, pages={2207--2211}, doi={10.21437/Interspeech.2018-1456}, url={http://dx.doi.org/10.21437/Interspeech.2018-1456} } ``` or arXiv: ```bibtex @misc{watanabe2018espnet, title={ESPnet: End-to-End Speech Processing Toolkit}, author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai}, year={2018}, eprint={1804.00015}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
ArBert/roberta-base-finetuned-ner-agglo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - image_folder metrics: - accuracy model-index: - name: orchid219_ft_vit-large-patch16-224-in21k-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: image_folder type: image_folder args: default metrics: - name: Accuracy type: accuracy value: 0.9230769230769231 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # orchid219_ft_vit-large-patch16-224-in21k-finetuned-eurosat This model is a fine-tuned version of [gary109/orchid219_ft_vit-large-patch16-224-in21k](https://huggingface.co/gary109/orchid219_ft_vit-large-patch16-224-in21k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.9545 - Accuracy: 0.9231 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 40 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 3.5728 | 0.96 | 17 | 2.1936 | 0.8718 | | 1.6005 | 1.96 | 34 | 1.2044 | 0.9359 | | 0.9764 | 2.96 | 51 | 0.9545 | 0.9231 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
ArBert/roberta-base-finetuned-ner-gmm-twitter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: finetuning-sentiment-analysis-en-id results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-analysis-en-id This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1654 - Accuracy: 0.9527 - F1: 0.9646 - Precision: 0.9641 - Recall: 0.9652 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.4566 | 1.0 | 1602 | 0.3666 | 0.8473 | 0.8909 | 0.8530 | 0.9323 | | 0.3458 | 2.0 | 3204 | 0.2193 | 0.9238 | 0.9432 | 0.9410 | 0.9454 | | 0.2362 | 3.0 | 4806 | 0.1654 | 0.9527 | 0.9646 | 0.9641 | 0.9652 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Araby/Arabic-TTS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-23T03:07:50Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-xlsr-mn-eng results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xlsr-mn-eng This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the IEMOCAP and Common Voice's MN dataset. Can be used to recognize speech on ENG and MN simultaneously. It achieves the following results on the evaluation set: - Loss: 0.3087 - Wer: 0.3402 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 8.8609 | 0.08 | 500 | 3.6078 | 1.0 | | 3.5494 | 0.15 | 1000 | 3.2044 | 1.0 | | 3.1699 | 0.23 | 1500 | 3.1560 | 1.0 | | 3.0955 | 0.3 | 2000 | 3.1087 | 1.0 | | 2.7918 | 0.38 | 2500 | 2.1146 | 1.0236 | | 2.0528 | 0.45 | 3000 | 1.4938 | 0.9648 | | 1.6329 | 0.53 | 3500 | 1.2614 | 0.9198 | | 1.3932 | 0.6 | 4000 | 1.0504 | 0.8314 | | 1.2652 | 0.68 | 4500 | 0.9664 | 0.7809 | | 1.1829 | 0.76 | 5000 | 0.8999 | 0.7381 | | 1.1674 | 0.83 | 5500 | 0.8200 | 0.6924 | | 1.0599 | 0.91 | 6000 | 0.7713 | 0.6729 | | 1.027 | 0.98 | 6500 | 0.7714 | 0.6616 | | 0.9289 | 1.06 | 7000 | 0.7571 | 0.6433 | | 0.9192 | 1.13 | 7500 | 0.6899 | 0.6151 | | 0.8996 | 1.21 | 8000 | 0.7012 | 0.6104 | | 0.9281 | 1.28 | 8500 | 0.6452 | 0.5914 | | 0.8656 | 1.36 | 9000 | 0.6162 | 0.5781 | | 0.8635 | 1.44 | 9500 | 0.6249 | 0.5672 | | 0.8388 | 1.51 | 10000 | 0.5936 | 0.5558 | | 0.8087 | 1.59 | 10500 | 0.5844 | 0.5466 | | 0.7755 | 1.66 | 11000 | 0.5838 | 0.5364 | | 0.8377 | 1.74 | 11500 | 0.5358 | 0.5202 | | 0.8308 | 1.81 | 12000 | 0.5333 | 0.5196 | | 0.7775 | 1.89 | 12500 | 0.5129 | 0.5060 | | 0.7747 | 1.96 | 13000 | 0.5164 | 0.5096 | | 0.7115 | 2.04 | 13500 | 0.5056 | 0.4936 | | 0.6974 | 2.12 | 14000 | 0.4925 | 0.4878 | | 0.6672 | 2.19 | 14500 | 0.5030 | 0.4908 | | 0.6396 | 2.27 | 15000 | 0.4821 | 0.4686 | | 0.6943 | 2.34 | 15500 | 0.4693 | 0.4624 | | 0.6413 | 2.42 | 16000 | 0.4626 | 0.4636 | | 0.6446 | 2.49 | 16500 | 0.4513 | 0.4609 | | 0.6338 | 2.57 | 17000 | 0.4386 | 0.4524 | | 0.6208 | 2.65 | 17500 | 0.4360 | 0.4445 | | 0.6397 | 2.72 | 18000 | 0.4348 | 0.4355 | | 0.6127 | 2.8 | 18500 | 0.4367 | 0.4318 | | 0.5956 | 2.87 | 19000 | 0.4376 | 0.4322 | | 0.6345 | 2.95 | 19500 | 0.4050 | 0.4308 | | 0.572 | 3.02 | 20000 | 0.4211 | 0.4219 | | 0.5447 | 3.1 | 20500 | 0.4042 | 0.4112 | | 0.5323 | 3.17 | 21000 | 0.4101 | 0.4153 | | 0.5677 | 3.25 | 21500 | 0.3952 | 0.4188 | | 0.5354 | 3.33 | 22000 | 0.3889 | 0.4007 | | 0.5297 | 3.4 | 22500 | 0.3793 | 0.3997 | | 0.5314 | 3.48 | 23000 | 0.3684 | 0.3956 | | 0.5217 | 3.55 | 23500 | 0.3572 | 0.3853 | | 0.5224 | 3.63 | 24000 | 0.3535 | 0.3867 | | 0.4983 | 3.7 | 24500 | 0.3636 | 0.3804 | | 0.5355 | 3.78 | 25000 | 0.3680 | 0.3770 | | 0.5115 | 3.85 | 25500 | 0.3472 | 0.3752 | | 0.5416 | 3.93 | 26000 | 0.3280 | 0.3689 | | 0.5104 | 4.01 | 26500 | 0.3319 | 0.3650 | | 0.4524 | 4.08 | 27000 | 0.3453 | 0.3632 | | 0.462 | 4.16 | 27500 | 0.3359 | 0.3600 | | 0.4823 | 4.23 | 28000 | 0.3268 | 0.3553 | | 0.4671 | 4.31 | 28500 | 0.3248 | 0.3535 | | 0.4702 | 4.38 | 29000 | 0.3278 | 0.3501 | | 0.483 | 4.46 | 29500 | 0.3183 | 0.3492 | | 0.4232 | 4.53 | 30000 | 0.3224 | 0.3470 | | 0.4227 | 4.61 | 30500 | 0.3171 | 0.3458 | | 0.4687 | 4.69 | 31000 | 0.3121 | 0.3537 | | 0.4486 | 4.76 | 31500 | 0.3088 | 0.3424 | | 0.4459 | 4.84 | 32000 | 0.3101 | 0.3407 | | 0.4513 | 4.91 | 32500 | 0.3077 | 0.3407 | | 0.4237 | 4.99 | 33000 | 0.3087 | 0.3402 | ### Framework versions - Transformers 4.19.2 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
Aracatto/Catto
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Introduction See https://github.com/k2-fsa/icefall/pull/330
Araf/Ummah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - zh inference: parameters: temperature: 0.7 top_p: 0.6 repetition_penalty: 1.1 max_new_tokens: 128 num_return_sequences: 3 do_sample: true license: apache-2.0 tags: - generate - gpt2 widget: - 北京是中国的 - 西湖的景色 --- # Wenzhong-GPT2-110M - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) - Docs: [Fengshenbang-Docs](https://fengshenbang-doc.readthedocs.io/) ## 简介 Brief Introduction 善于处理NLG任务,中文版的GPT2-Small。 Focused on handling NLG tasks, Chinese GPT2-Small. ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 通用 General | 自然语言生成 NLG | 闻仲 Wenzhong | GPT2 | 110M | 中文 Chinese | ## 模型信息 Model Information 类似于Wenzhong2.0-GPT2-3.5B-chinese,我们实现了一个small版本的12层的Wenzhong-GPT2-110M,并且在悟道(300G版本)上面进行预训练。 Similar to Wenzhong2.0-GPT2-3.5B-chinese, we implement a small size Wenzhong-GPT2-110M with 12 layers, which is pre-trained on Wudao Corpus (300G version). ## 使用 Usage ### 加载模型 Loading Models ```python from transformers import GPT2Tokenizer,GPT2LMHeadModel hf_model_path = 'IDEA-CCNL/Wenzhong-GPT2-110M' tokenizer = GPT2Tokenizer.from_pretrained(hf_model_path) model = GPT2LMHeadModel.from_pretrained(hf_model_path) ``` ### 使用示例 Usage Examples ```python question = "北京是中国的" inputs = tokenizer(question,return_tensors='pt') generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, max_length=150, # max_new_tokens=80, do_sample=True, top_p = 0.6, # num_beams=5, eos_token_id=50256, pad_token_id=0, num_return_sequences = 5) for idx,sentence in enumerate(generation_output.sequences): print('next sentence %d:\n'%idx, tokenizer.decode(sentence).split('<|endoftext|>')[0]) print('*'*40) ``` ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2209.02970): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2209.02970): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` 也可以引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
Aran/DialoGPT-medium-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2022-05-23T03:36:04Z
# Introduction See https://github.com/k2-fsa/icefall/pull/330 No random combiner inside. Tensorboard log: https://tensorboard.dev/experiment/VKoVx6IZTBuGCJN9kt72BQ/
ArashEsk95/bert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Introduction See https://github.com/k2-fsa/icefall/pull/330 No random combiner inside. Tensorboard logs: https://tensorboard.dev/experiment/vZGRckYUR4eNjnBJ9AOEkg/