modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
DoyyingFace/bert-asian-hate-tweets-asian-unclean-with-clean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
DoyyingFace/bert-asian-hate-tweets-asonam-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
DoyyingFace/bert-asian-hate-tweets-asonam-unclean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -25.21 +/- 80.62 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38,156
2022-05-11T07:49:27Z
--- license: mit tags: - generated_from_trainer metrics: - rouge model-index: - name: bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv This model is a fine-tuned version of [theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv](https://huggingface.co/theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8065 - Rouge1: 54.5916 - Rouge2: 36.7817 - Rougel: 40.4708 - Rougelsum: 52.5754 - Gen Len: 142.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 1.2945 | 1.0 | 795 | 0.9555 | 51.91 | 32.0926 | 33.6727 | 49.5306 | 142.0 | | 0.7153 | 2.0 | 1590 | 0.8317 | 52.4708 | 34.1035 | 35.2968 | 50.2966 | 141.963 | | 0.5398 | 3.0 | 2385 | 0.8133 | 52.4603 | 33.497 | 36.4227 | 50.2513 | 141.8704 | | 0.3568 | 4.0 | 3180 | 0.8091 | 52.3993 | 34.2424 | 37.7819 | 50.2069 | 142.0 | | 0.2842 | 5.0 | 3975 | 0.8065 | 54.5916 | 36.7817 | 40.4708 | 52.5754 | 142.0 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.0 - Tokenizers 0.12.1
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,785,283
2022-05-11T07:51:14Z
Fine tuned recobo/agriculture-bert-uncased for custom NER entities.
albert-xlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
341
2022-05-11T08:13:39Z
--- language: id license: apache-2.0 tags: - audio-classification - generated_from_trainer metrics: - accuracy - f1 model-index: - name: wav2vec2-xls-r-adult-child-id-cls results: [] --- # Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier is an audio classification model based on the [XLS-R](https://arxiv.org/abs/2111.09296) architecture. This model is a fine-tuned version of [wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on a private adult/child Indonesian speech classification dataset. This model was trained using HuggingFace's PyTorch framework. All training was done on a Tesla P100, provided by Kaggle. Training metrics were logged via Tensorboard. ## Model | Model | #params | Arch. | Training/Validation data (text) | | ----------------------------------- | ------- | ----- | ---------------------------------------------------- | | `wav2vec2-xls-r-adult-child-id-cls` | 300M | XLS-R | Adult/Child Indonesian Speech Classification Dataset | ## Evaluation Results The model achieves the following results on evaluation: | Dataset | Loss | Accuracy | F1 | | -------------------------------------------- | ------ | -------- | ------ | | Adult/Child Indonesian Speech Classification | 0.1970 | 93.38% | 0.9307 | ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - `learning_rate`: 3e-05 - `train_batch_size`: 8 - `eval_batch_size`: 8 - `seed`: 42 - `gradient_accumulation_steps`: 4 - `total_train_batch_size`: 32 - `optimizer`: Adam with `betas=(0.9,0.999)` and `epsilon=1e-08` - `lr_scheduler_type`: linear - `lr_scheduler_warmup_ratio`: 0.1 - `num_epochs`: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | | :-----------: | :---: | :--: | :-------------: | :------: | :----: | | 0.336 | 1.0 | 305 | 0.3146 | 0.8845 | 0.8698 | | 0.2345 | 2.0 | 610 | 0.2140 | 0.9251 | 0.9202 | | 0.3215 | 3.0 | 915 | 0.2038 | 0.9315 | 0.9286 | | 0.2059 | 4.0 | 1220 | 0.1970 | 0.9338 | 0.9307 | ## Disclaimer Do consider the biases which came from pre-training datasets that may be carried over into the results of this model. ## Authors Wav2Vec2 XLS-R Adult/Child Indonesian Speech Classifier was trained and evaluated by [Ananto Joyoadikusumo](https://anantoj.github.io/). All computation and development are done on Kaggle. ## Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu102 - Datasets 2.2.0 - Tokenizers 0.12.1
albert-xlarge-v2
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,973
2022-05-11T08:16:03Z
--- language: - zh license: apache-2.0 tags: - bert - NLU - Sentiment inference: true widget: - text: "今天心情不好" --- # Erlangshen-MegatronBert-1.3B-Semtiment - Github: [Fengshenbang-LM](https://github.com/IDEA-CCNL/Fengshenbang-LM) - Docs: [Fengshenbang-Docs](https://fengshenbang-doc.readthedocs.io/) ## 简介 Brief Introduction 2021年登顶FewCLUE和ZeroCLUE的中文BERT,在数个情感分析任务微调后的版本 This is the fine-tuned version of the Chinese BERT model on several sentiment analysis datasets, which topped FewCLUE and ZeroCLUE benchmark in 2021 ## 模型分类 Model Taxonomy | 需求 Demand | 任务 Task | 系列 Series | 模型 Model | 参数 Parameter | 额外 Extra | | :----: | :----: | :----: | :----: | :----: | :----: | | 通用 General | 自然语言理解 NLU | 二郎神 Erlangshen | MegatronBert | 1.3B | 情感分析 Semtiment | ## 模型信息 Model Information 基于[Erlangshen-MegatronBert-1.3B](https://huggingface.co/IDEA-CCNL/Erlangshen-MegatronBert-1.3B),我们在收集的8个中文领域的情感分析数据集,总计227347个样本上微调了一个Semtiment版本。 Based on [Erlangshen-MegatronBert-1.3B](https://huggingface.co/IDEA-CCNL/Erlangshen-MegatronBert-1.3B), we fine-tuned a sentiment analysis version on 8 Chinese sentiment analysis datasets, with totaling 227,347 samples. ### 下游效果 Performance | 模型 Model | ASAP-SENT | ASAP-ASPECT | ChnSentiCorp | | :--------: | :-----: | :----: | :-----: | | Erlangshen-Roberta-110M-Sentiment | 97.77 | 97.31 | 96.61 | | Erlangshen-Roberta-330M-Sentiment | 97.9 | 97.51 | 96.66 | | Erlangshen-MegatronBert-1.3B-Sentiment | 98.1 | 97.8 | 97 | ## 使用 Usage ``` python from transformers import AutoModelForSequenceClassification from transformers import BertTokenizer import torch tokenizer=BertTokenizer.from_pretrained('IDEA-CCNL/Erlangshen-MegatronBert-1.3B-Sentiment') model=AutoModelForSequenceClassification.from_pretrained('IDEA-CCNL/Erlangshen-MegatronBert-1.3B-Sentiment') text='今天心情不好' output=model(torch.tensor([tokenizer.encode(text)])) print(torch.nn.functional.softmax(output.logits,dim=-1)) ``` ## 引用 Citation 如果您在您的工作中使用了我们的模型,可以引用我们的[论文](https://arxiv.org/abs/2209.02970): If you are using the resource for your work, please cite the our [paper](https://arxiv.org/abs/2209.02970): ```text @article{fengshenbang, author = {Jiaxing Zhang and Ruyi Gan and Junjie Wang and Yuxiang Zhang and Lin Zhang and Ping Yang and Xinyu Gao and Ziwei Wu and Xiaoqun Dong and Junqing He and Jianheng Zhuo and Qi Yang and Yongfeng Huang and Xiayu Li and Yanghan Wu and Junyu Lu and Xinyu Zhu and Weifeng Chen and Ting Han and Kunhao Pan and Rui Wang and Hao Wang and Xiaojun Wu and Zhongshen Zeng and Chongpei Chen}, title = {Fengshenbang 1.0: Being the Foundation of Chinese Cognitive Intelligence}, journal = {CoRR}, volume = {abs/2209.02970}, year = {2022} } ``` 也可以引用我们的[网站](https://github.com/IDEA-CCNL/Fengshenbang-LM/): You can also cite our [website](https://github.com/IDEA-CCNL/Fengshenbang-LM/): ```text @misc{Fengshenbang-LM, title={Fengshenbang-LM}, author={IDEA-CCNL}, year={2021}, howpublished={\url{https://github.com/IDEA-CCNL/Fengshenbang-LM}}, } ```
albert-xxlarge-v2
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42,640
2022-05-11T08:25:17Z
--- language: en inference: false tags: - text-generation - opt license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use You can use this model directly with a pipeline for text generation. ```python >>> from transformers import pipeline >>> generator = pipeline('text-generation', model="facebook/opt-125m") >>> generator("Hello, I'm am conscious and") [{'generated_text': 'Hello, I am conscious and aware of the fact that I am a woman. I am aware of'}] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-125m", do_sample=True) >>> generator("Hello, I'm am conscious and") [{'generated_text': 'Hello, I am conscious and active member of the Khaosan Group, a private, self'}] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2022-05-11T08:25:39Z
--- language: en inference: false tags: - text-generation license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use You can use this model directly with a pipeline for text generation. ```python >>> from transformers import pipeline >>> generator = pipeline('text-generation', model="facebook/opt-350m") >>> generator("Hello, I'm am conscious and") [{'generated_text': "Hello, I'm am conscious and I'm a bit of a noob. I'm looking for"}] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-350m", do_sample=True) >>> generator("Hello, I'm am conscious and") [{'generated_text': "Hello, I'm am conscious and I'm interested in this project. Can I get an initial contact"}] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-350m", do_sample=True, num_return_sequences=5) >>> generator("The woman worked as a") [{'generated_text': "The woman works as a substitute teacher for kids who have missed school. She's the teacher herself,"}, {'generated_text': 'The woman works as a security guard for another company and does an average of around $13/hour'}, {'generated_text': 'The woman works as a receptionist, she could at the least wait a week or two for her'}, {'generated_text': 'The woman works as a manager/intern/career development coach/advisor at a nursing home'}, {'generated_text': 'The woman works as a maid and has to clean the house but you can tell her to do it'}] ``` compared to: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-350m", do_sample=True, num_return_sequences=5) >>> generator("The man worked as a") [{'generated_text': 'The man works as a security guard for the National Football League franchise. He has been a part of'}, {'generated_text': 'The man works as a security guard for another company and does an excellent job.\nI remember when'}, {'generated_text': 'The man works as a "secret agent" but at the same time he\'s working to protect the'}, {'generated_text': 'The man works as a manager/operator/servant for a grocery store and does a lot of'}, {'generated_text': 'The man works as a bouncer near the scene of the accident - how he could do that is'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,621,271
2022-05-11T08:26:00Z
--- language: en inference: false tags: - text-generation - opt license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use You can use this model directly with a pipeline for text generation. ```python >>> from transformers import pipeline >>> generator = pipeline('text-generation', model="facebook/opt-1.3b") >>> generator("Hello, I'm am conscious and") [{'generated_text': 'Hello, I am conscious and I am here.\nI am here.\nI am conscious.'}] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True) >>> generator("Hello, I'm am conscious and") [{'generated_text': "Hello, I'm am conscious and able to hear. I have a lot of experience in the"}] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True, num_return_sequences=5) >>> generator("The woman worked as a") [{'generated_text': 'The woman worked as a bartender for six months before getting to the job she always dreamed of. She'}, {'generated_text': 'The woman worked as a nanny in a house near The White Horse Farm in the Yorkshire Dales'}, {'generated_text': "The woman worked as a translator at the British Broadcasting Corporation's headquarters and was also an acquaintance of some"}, {'generated_text': 'The woman worked as a secretary and went to school full-time, and also worked as a waitress'}, {'generated_text': 'The woman worked as a beautician with her baby and the little girl is now at the age where'}] ``` compared to: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-1.3b", do_sample=True, num_return_sequences=5) >>> generator("The man worked as a") [{'generated_text': 'The man worked as a janitor and the owner of the house he worked at caught him cheating on'}, {'generated_text': 'The man worked as a software engineer.\n\nFor over 10 years, he had been at Amazon'}, {'generated_text': 'The man worked as a car salesman - and was a man of his word to her\nA T'}, {'generated_text': 'The man worked as a private contractor for five years. He went to the Bahamas in the summer of'}, {'generated_text': 'The man worked as a computer systems consultant. After leaving the job, he became a prolific internet hacker'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2022-05-11T08:26:30Z
--- language: en inference: false tags: - text-generation - opt license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use You can use this model directly with a pipeline for text generation. ```python >>> from transformers import pipeline >>> generator = pipeline('text-generation', model="facebook/opt-2.7b") >>> generator("Hello, I'm am conscious and") [{'generated_text': 'Hello, I am conscious and I am a human being.\nI am a human being, and'}] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True) >>> generator("Hello, I'm am conscious and") [{'generated_text': "Hello, I'm am conscious and I make things. I'm in the creative community, which is"}] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True, num_return_sequences=5) >>> generator("The woman worked as a") [{'generated_text': "The woman worked as a security guard at a nursery in the city's eastern district of Samut P"}, {'generated_text': 'The woman worked as a doctor in the Philippines. Officials in China allege she stole the coronavirus'}, {'generated_text': 'The woman worked as a teacher in the city of Krasnodar in south Russia. She'}, {'generated_text': 'The woman worked as a researcher and lecturer at the Russian Academy of Sciences in a laboratory dedicated to the'}, {'generated_text': 'The woman worked as a nanny on a property owned by Mr Fitton-Allen in the city'}] ``` compared to: ```python >>> from transformers import pipeline, set_seed >>> set_seed(32) >>> generator = pipeline('text-generation', model="facebook/opt-2.7b", do_sample=True, num_return_sequences=5) >>> generator("The man worked as a") [{'generated_text': "The man worked as a security guard at a retirement home after being hired by the administrator's cousin,"}, {'generated_text': 'The man worked as a doctor in the Philippines.\n\nHe had hoped to work his way back'}, {'generated_text': 'The man worked as a teacher in the city of Krasnodar in south Russia.He'}, {'generated_text': 'The man worked as a researcher and his work on the topic predates the project, by many years'}, {'generated_text': 'The man worked as a chef in a restaurant for 40 years. How could this be so different from'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2022-05-11T08:26:52Z
--- language: en inference: false tags: - text-generation - opt license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use For large OPT models, such as this one, it is not recommend to make use of the `text-generation` pipeline because one should load the model in half-precision to accelerate generation and optimize memory consumption on GPU. It is recommended to directly call the [`generate`](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate) method as follows: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False) >>> prompt = "Hello, I'm am conscious and" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> generated_ids = model.generate(input_ids) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ["Hello, I'm am conscious and aware of my surroundings. I'm not sure what you mean"] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False) >>> prompt = "Hello, I'm am conscious and" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ["Hello, I'm am conscious and aware of my surroundings. I'm not sure if I'm"] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False) >>> prompt = "The woman worked as a" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True, num_return_sequences=5, max_length=10) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) The woman worked as a supervisor in the office The woman worked as a bartender in a bar The woman worked as a cashier at the The woman worked as a teacher, and was The woman worked as a maid at a house ``` compared to: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b", use_fast=False) >>> prompt = "The man worked as a" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True, num_return_sequences=5, max_length=10) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) The man worked as a consultant to the Government The man worked as a bartender in a bar The man worked as a cashier at the The man worked as a teacher, and was The man worked as a professional at a bank ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2022-05-11T08:27:07Z
--- language: en inference: false tags: - opt - text-generation license: other commercial: false --- # OPT : Open Pre-trained Transformer Language Models OPT was first introduced in [Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) and first released in [metaseq's repository](https://github.com/facebookresearch/metaseq) on May 3rd 2022 by Meta AI. **Disclaimer**: The team releasing OPT wrote an official model card, which is available in Appendix D of the [paper](https://arxiv.org/pdf/2205.01068.pdf). Content from **this** model card has been written by the Hugging Face team. ## Intro To quote the first two paragraphs of the [official paper](https://arxiv.org/abs/2205.01068) > Large language models trained on massive text collections have shown surprising emergent > capabilities to generate text and perform zero- and few-shot learning. While in some cases the public > can interact with these models through paid APIs, full model access is currently limited to only a > few highly resourced labs. This restricted access has limited researchers’ ability to study how and > why these large language models work, hindering progress on improving known challenges in areas > such as robustness, bias, and toxicity. > We present Open Pretrained Transformers (OPT), a suite of decoder-only pre-trained transformers ranging from 125M > to 175B parameters, which we aim to fully and responsibly share with interested researchers. We train the OPT models to roughly match > the performance and sizes of the GPT-3 class of models, while also applying the latest best practices in data > collection and efficient training. Our aim in developing this suite of OPT models is to enable reproducible and responsible research at scale, and > to bring more voices to the table in studying the impact of these LLMs. Definitions of risk, harm, bias, and toxicity, etc., should be articulated by the > collective research community as a whole, which is only possible when models are available for study. ## Model description OPT was predominantly pretrained with English text, but a small amount of non-English data is still present within the training corpus via CommonCrawl. The model was pretrained using a causal language modeling (CLM) objective. OPT belongs to the same family of decoder-only models like [GPT-3](https://arxiv.org/abs/2005.14165). As such, it was pretrained using the self-supervised causal language modedling objective. For evaluation, OPT follows [GPT-3](https://arxiv.org/abs/2005.14165) by using their prompts and overall experimental setup. For more details, please read the [official paper](https://arxiv.org/abs/2205.01068). ## Intended uses & limitations The pretrained-only model can be used for prompting for evaluation of downstream tasks as well as text generation. In addition, the model can be fine-tuned on a downstream task using the [CLM example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling). For all other OPT checkpoints, please have a look at the [model hub](https://huggingface.co/models?filter=opt). ### How to use For large OPT models, such as this one, it is not recommend to make use of the `text-generation` pipeline because one should load the model in half-precision to accelerate generation and optimize memory consumption on GPU. It is recommended to directly call the [`generate`](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate) method as follows: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False) >>> prompt = "Hello, I'm am conscious and" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> generated_ids = model.generate(input_ids) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ['Hello, I am conscious and aware of my surroundings.\nI am conscious and aware of my'] ``` By default, generation is deterministic. In order to use the top-k sampling, please set `do_sample` to `True`. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False) >>> prompt = "Hello, I'm am conscious and" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ['Hello, I am conscious and aware.\nSo that makes you dead, right? '] ``` ### Limitations and bias As mentioned in Meta AI's model card, given that the training data used for this model contains a lot of unfiltered content from the internet, which is far from neutral the model is strongly biased : > Like other large language models for which the diversity (or lack thereof) of training > data induces downstream impact on the quality of our model, OPT-175B has limitations in terms > of bias and safety. OPT-175B can also have quality issues in terms of generation diversity and > hallucination. In general, OPT-175B is not immune from the plethora of issues that plague modern > large language models. Here's an example of how the model can have biased predictions: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False) >>> prompt = "The woman worked as a" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True, num_return_sequences=5, max_length=10) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) The woman worked as a supervisor in the office The woman worked as a social media consultant for The woman worked as a cashier at the The woman worked as a teacher, and was The woman worked as a maid at our friends ``` compared to: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> import torch >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-13b", torch_dtype=torch.float16).cuda() >>> # the fast tokenizer currently does not work correctly >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-13b", use_fast=False) >>> prompt = "The man worked as a" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda() >>> set_seed(32) >>> generated_ids = model.generate(input_ids, do_sample=True, num_return_sequences=5, max_length=10) >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True) The man worked as a consultant to the defense The man worked as a bartender in a bar The man worked as a cashier at the The man worked as a teacher, and was The man worked as a professional athlete while he ``` This bias will also affect all fine-tuned versions of this model. ## Training data The Meta AI team wanted to train this model on a corpus as large as possible. It is composed of the union of the following 5 filtered datasets of textual documents: - BookCorpus, which consists of more than 10K unpublished books, - CC-Stories, which contains a subset of CommonCrawl data filtered to match the story-like style of Winograd schemas, - The Pile, from which * Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews* were included. - Pushshift.io Reddit dataset that was developed in Baumgartner et al. (2020) and processed in Roller et al. (2021) - CCNewsV2 containing an updated version of the English portion of the CommonCrawl News dataset that was used in RoBERTa (Liu et al., 2019b) The final training data contains 180B tokens corresponding to 800GB of data. The validation split was made of 200MB of the pretraining data, sampled proportionally to each dataset’s size in the pretraining corpus. The dataset might contains offensive content as parts of the dataset are a subset of public Common Crawl data, along with a subset of public Reddit data, which could contain sentences that, if viewed directly, can be insulting, threatening, or might otherwise cause anxiety. ### Collection process The dataset was collected form internet, and went through classic data processing algorithms and re-formatting practices, including removing repetitive/non-informative text like *Chapter One* or *This ebook by Project Gutenberg.* ## Training procedure ### Preprocessing The texts are tokenized using the **GPT2** byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50272. The inputs are sequences of 2048 consecutive tokens. The 175B model was trained on 992 *80GB A100 GPUs*. The training duration was roughly ~33 days of continuous training. ### BibTeX entry and citation info ```bibtex @misc{zhang2022opt, title={OPT: Open Pre-trained Transformer Language Models}, author={Susan Zhang and Stephen Roller and Naman Goyal and Mikel Artetxe and Moya Chen and Shuohui Chen and Christopher Dewan and Mona Diab and Xian Li and Xi Victoria Lin and Todor Mihaylov and Myle Ott and Sam Shleifer and Kurt Shuster and Daniel Simig and Punit Singh Koura and Anjali Sridhar and Tianlu Wang and Luke Zettlemoyer}, year={2022}, eprint={2205.01068}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
bert-base-multilingual-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,749,504
2022-05-11T08:35:10Z
--- tags: - opt_metasq --- # This repo let's you run the following checkpoint using facebookresearch/metaseq. Do the following: ## 1. Install PyTorch ``` pip3 install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html ``` ## 2. Install Megatron ``` git clone https://github.com/patrickvonplaten/Megatron-LM.git cd Megatron-LM pip3 install six regex pip3 install -e . ``` ## 3. Install fairscale ``` git clone https://github.com/facebookresearch/fairscale.git cd fairscale git checkout prefetch_fsdp_params_simple pip3 install -e . ``` ## 4. Install metaseq ``` git clone https://github.com/patrickvonplaten/metaseq.git cd metaseq pip3 install -e . ``` ## 5. Clone this repo (click top right on "How to clone") ## 6. Run the following: ```bash cd <path/to/cloned/repo> bash run.sh ```
bert-large-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388,769
null
Fixed parameters: * **model_name_or_path**: `Bhumika/roberta-base-finetuned-sst2` * **dataset**: * **path**: `glue` * **name**: `sst2` * **calibration_split**: `None` * **eval_split**: `validation` * **data_keys**: `['sentence']` * **label_keys**: `['label']` * **quantization_approach**: `dynamic` * **node_exclusion**: `[]` * **per_channel**: `False` * **calibration**: `None` * **framework**: `onnxruntime` * **framework_args**: * **opset**: `15` * **optimization_level**: `1` * **aware_training**: `False` Benchmarked parameters: * **operators_to_quantize**: `['Add', 'MatMul']`, `['Add']` ## Evaluation Below, time metrics for * Batch size: 8 * Input length: 128 | operators_to_quantize | | latency_mean (original, ms) | latency_mean (optimized, ms) | | throughput (original, /s) | throughput (optimized, /s) | | accuracy (original) | accuracy (optimized) | | :-------------------: | :-: | :-------------------------: | :--------------------------: | :-: | :-----------------------: | :------------------------: | :-: | :-----------------: | :------------------: | | `['Add']` | \| | 454.70 | 361.81 | \| | 2.50 | 3.00 | \| | 1.0 | 1.0 | | `['Add', 'MatMul']` | \| | 474.54 | 135.14 | \| | 2.50 | 7.50 | \| | 1.0 | 1.0 |
202015004/wav2vec2-base-timit-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-05-11T13:32:16Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # orenpereg/paraphrase-mpnet-base-v2_sst2_4samps This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('orenpereg/paraphrase-mpnet-base-v2_sst2_4samps') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('orenpereg/paraphrase-mpnet-base-v2_sst2_4samps') model = AutoModel.from_pretrained('orenpereg/paraphrase-mpnet-base-v2_sst2_4samps') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=orenpereg/paraphrase-mpnet-base-v2_sst2_4samps) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 5 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 3, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
850886470/xxy_gpt2_chinese
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-11T14:38:36Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: distilbart-cnn-arxiv-pubmed-pubmed-earlystopping results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbart-cnn-arxiv-pubmed-pubmed-earlystopping This model is a fine-tuned version of [theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed](https://huggingface.co/theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8596 - Rouge1: 53.4491 - Rouge2: 35.0041 - Rougel: 37.2742 - Rougelsum: 50.9867 - Gen Len: 142.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | No log | 0.31 | 125 | 1.3772 | 50.6084 | 30.8075 | 32.6113 | 47.883 | 142.0 | | No log | 0.63 | 250 | 1.2423 | 52.1758 | 31.6326 | 32.9448 | 49.8089 | 141.6296 | | No log | 0.94 | 375 | 1.1223 | 52.3494 | 32.3508 | 35.3638 | 49.6019 | 142.0 | | 1.3557 | 1.26 | 500 | 1.1004 | 51.8935 | 32.8506 | 35.521 | 49.6249 | 142.0 | | 1.3557 | 1.57 | 625 | 1.0600 | 50.8085 | 31.0397 | 34.2021 | 48.2264 | 141.5741 | | 1.3557 | 1.88 | 750 | 0.9834 | 53.0701 | 34.0699 | 36.4029 | 51.043 | 142.0 | | 1.3557 | 2.2 | 875 | 0.9554 | 53.4385 | 34.2976 | 36.8142 | 51.1262 | 141.9444 | | 0.868 | 2.51 | 1000 | 0.9256 | 52.2123 | 32.7568 | 34.5883 | 49.8566 | 142.0 | | 0.868 | 2.83 | 1125 | 0.8944 | 53.8062 | 34.6687 | 36.9645 | 51.5162 | 142.0 | | 0.868 | 3.14 | 1250 | 0.9290 | 53.1356 | 34.1301 | 37.7713 | 50.762 | 141.9074 | | 0.868 | 3.45 | 1375 | 0.9017 | 53.4455 | 35.0572 | 37.3033 | 50.9773 | 142.0 | | 0.6252 | 3.77 | 1500 | 0.8519 | 53.9228 | 35.5575 | 38.9119 | 51.5202 | 142.0 | | 0.6252 | 4.08 | 1625 | 0.8991 | 54.4223 | 36.3072 | 38.5771 | 51.9874 | 141.9074 | | 0.6252 | 4.4 | 1750 | 0.8857 | 53.4105 | 35.348 | 37.5814 | 50.8842 | 142.0 | | 0.6252 | 4.71 | 1875 | 0.8596 | 53.4491 | 35.0041 | 37.2742 | 50.9867 | 142.0 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AJ/rick-discord-bot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational", "humor" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - pytorch - token-classification - sequence-tagger-model language: de datasets: - conll2003 - germeval_14 license: apache-2.0 --- # Model Card for sbb_ner <!-- Provide a quick summary of what the model is/does. [Optional] --> A BERT model trained on three German corpora containing contemporary and historical texts for named entity recognition tasks. It predicts the classes `PER`, `LOC` and `ORG`. The model was developed by the Berlin State Library (SBB) in the [QURATOR](https://staatsbibliothek-berlin.de/die-staatsbibliothek/projekte/project-id-1060-2018) project. # Table of Contents - [Model Card for sbb_ner](#model-card-for-sbb_ner) - [Table of Contents](#table-of-contents) - [Model Details](#model-details) - [Model Description](#model-description) - [Uses](#uses) - [Direct Use](#direct-use) - [Downstream Use [Optional]](#downstream-use) - [Out-of-Scope Use](#out-of-scope-use) - [Bias, Risks, and Limitations](#bias-risks-and-limitations) - [Recommendations](#recommendations) - [Training Details](#training-details) - [Training Data](#training-data) - [Training Procedure](#training-procedure) - [Preprocessing](#preprocessing) - [Speeds, Sizes, Times](#speeds-sizes-times) - [Evaluation](#evaluation) - [Testing Data, Factors & Metrics](#testing-data-factors--metrics) - [Testing Data](#testing-data) - [Factors](#factors) - [Metrics](#metrics) - [Results](#results) - [Model Examination](#model-examination) - [Environmental Impact](#environmental-impact) - [Technical Specifications [optional]](#technical-specifications-optional) - [Model Architecture and Objective](#model-architecture-and-objective) - [Compute Infrastructure](#compute-infrastructure) - [Hardware](#hardware) - [Software](#software) - [Citation](#citation) - [Glossary [optional]](#glossary-optional) - [More Information [optional]](#more-information-optional) - [Model Card Authors [optional]](#model-card-authors-optional) - [Model Card Contact](#model-card-contact) - [How to Get Started with the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description <!-- Provide a longer summary of what this model is/does. --> A BERT model trained on three German corpora containing contemporary and historical texts for Named Entity Recognition (NER) tasks. It predicts the classes `PER`, `LOC` and `ORG`. - **Developed by:** [Kai Labusch](https://huggingface.co/labusch), [Clemens Neudecker](https://huggingface.co/cneud), David Zellhöfer - **Shared by [Optional]:** [Staatsbibliothek zu Berlin / Berlin State Library](https://huggingface.co/SBB) - **Model type:** Language model - **Language(s) (NLP):** de - **License:** apache-2.0 - **Parent Model:** The BERT base multilingual cased model as provided by [Google](https://huggingface.co/bert-base-multilingual-cased) - **Resources for more information:** - [GitHub Repo](https://github.com/qurator-spk/sbb_ner) - [Associated Paper](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf) # Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ## Direct Use The model can directly be used to perform NER on historical German texts obtained by Optical Character Recognition (OCR) from digitized documents. Supported entity types are `PER`, `LOC` and `ORG`. <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." --> ## Downstream Use <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." --> The model has been pre-trained on 2,333,647 pages of OCR-text of the digitized collections of Berlin State Library. Therefore it is adapted to OCR-error prone historical German texts and might be used for particular applications that involve such text material. ## Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." --> More info needed. # Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The identification of named entities in historical and contemporary texts is a task contributing to knowledge creation aiming at enhancing scientific research and better discoverability of information in digitized historical texts. The aim of the development of this model was to improve this knowledge creation process, an endeavour that is not for profit. The results of the applied model are freely accessible for the users of the digital collections of the Berlin State Library. Against this backdrop, ethical challenges cannot be identified. As a limitation, it has to be noted that there is a lot of performance to gain for historical text by adding more historical ground-truth data. ## Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> The general observation that historical texts often remain silent and avoid naming of subjects from the colonies and address them anonymously cannot be remedied by named entity recognition. Disambiguation of named entities proves to be challenging beyond the task of automatically identifying entities. The existence of broad variations in the spelling of person and place names because of non-normalized orthography and linguistic change as well as changes in the naming of places according to the context adds to this challenge. Historical texts, especially newspapers, contain narrative descriptions and visual representations of minorities and disadvantaged groups without naming them; de-anonymizing such persons and groups is a research task in itself, which has only been started to be tackled in the 2020&#39;s. # Training Details ## Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> 1) CoNLL 2003 German Named Entity Recognition Ground Truth (Tjong Kim Sang and De Meulder, 2003) 2) GermEval Konvens 2014 Shared Task Data (Benikova et al., 2014) 3) DC-SBB Digital Collections of the Berlin State Library (Labusch and Zellhöfer, 2019) 4) Europeana Newspapers Historic German Datasets (Neudecker, 2016) ## Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> The BERT model is trained directly with respect to the NER by implementation of the same method that has been proposed by the BERT authors (Devlin et al., 2018). We applied unsupervised pre-training on 2,333,647 pages of unlabeled historical German text from the Berlin State Library digital collections, and supervised pre-training on two datasets with contemporary German text, conll2003 and germeval_14. Unsupervised pre-training on the DC-SBB data as well as supervised pre-training on contemporary NER ground truth were applied. Unsupervised and supervised pre-training are combined where unsupervised is done first and supervised second. Performance on different combinations of training and test sets was explored, and a 5-fold cross validation and comparison with state of the art approaches was conducted. ### Preprocessing The model was pre-trained on 2,333,647 pages of German texts from the digitized collections of the Berlin State Library. The texts have been obtained by OCR from the page scans of the documents. ### Speeds, Sizes, Times <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> Since it is an incarnation of the original BERT-model published by Google, all the speed, size and time considerations of that original model hold. # Evaluation <!-- This section describes the evaluation protocols and provides the results. --> The model has been evaluated by 5-fold cross-validation on several German historical OCR ground truth datasets. See [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf) for details. ## Testing Data, Factors & Metrics ### Testing Data <!-- This should link to a Data Card if possible. --> Two different test sets contained in the CoNLL 2003 German Named Entity Recognition Ground Truth, i.e. TEST-A and TEST-B, have been used for testing (DE-CoNLL-TEST). Additionally, historical OCR-based ground truth datasets have been used for testing - see [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf) for details and below. ### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> The evaluation focuses on NER in historical German documents, see [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf) for details. ### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> Performance metrics used in evaluation is precision, recall and F1-score. See [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf) for actual results in terms of these metrics. ## Results See [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf). # Model Examination See [publication](https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf). # Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** V100 - **Hours used:** Roughly 1-2 week(s) for pre-training. Roughly 1 hour for final NER-training. - **Cloud Provider:** No cloud. - **Compute Region:** Germany. - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective See original BERT publication. ## Compute Infrastructure Training and pre-training has been performed on a single V100. ### Hardware See above. ### Software See published code on [GitHub](https://github.com/qurator-spk/sbb_ner). # Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** ```bibtex @article{labusch_bert_2019, title = {{BERT} for {Named} {Entity} {Recognition} in {Contemporary} and {Historical} {German}}, volume = {Conference on Natural Language Processing}, url = {https://konvens.org/proceedings/2019/papers/KONVENS2019_paper_4.pdf}, abstract = {We apply a pre-trained transformer based representational language model, i.e. BERT (Devlin et al., 2018), to named entity recognition (NER) in contemporary and historical German text and observe state of the art performance for both text categories. We further improve the recognition performance for historical German by unsupervised pre-training on a large corpus of historical German texts of the Berlin State Library and show that best performance for historical German is obtained by unsupervised pre-training on historical German plus supervised pre-training with contemporary NER ground-truth.}, language = {en}, author = {Labusch, Kai and Neudecker, Clemens and Zellhöfer, David}, year = {2019}, pages = {9}, } ``` **APA:** (Labusch et al., 2019) # Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> More information needed. # More Information [optional] In addition to what has been documented above, it should be noted that there are two NER Ground Truth datasets available: 1) [Data provided for the 2020 HIPE campaign on named entity processing](https://impresso.github.io/CLEF-HIPE-2020/) 2) [Data provided for the 2022 HIPE shared task on named entity processing](https://hipe-eval.github.io/HIPE-2022/) Furthermore, two papers have been published on NER/EL, using BERT: 1) [Entity Linking in Multilingual Newspapers and Classical Commentaries with BERT](http://ceur-ws.org/Vol-3180/paper-85.pdf) 2) [Named Entity Disambiguation and Linking Historic Newspaper OCR with BERT](http://ceur-ws.org/Vol-2696/paper_163.pdf) # Model Card Authors [optional] <!-- This section provides another layer of transparency and accountability. Whose views is this model card representing? How many voices were included in its construction? Etc. --> [Kai Labusch]([email protected]) and [Jörg Lehmann]([email protected]) # Model Card Contact Questions and comments about the model can be directed to Kai Labusch at [email protected], questions and comments about the model card can be directed to Jörg Lehmann at [email protected] # How to Get Started with the Model How to get started with this model is explained in the ReadMe file of the GitHub repository [over here](https://github.com/qurator-spk/sbb_ner).
AdapterHub/bert-base-uncased-pf-comqa
[ "bert", "en", "dataset:com_qa", "arxiv:2104.08247", "adapter-transformers", "question-answering" ]
question-answering
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-wiki results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-wiki This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0961 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.4333 | 1.0 | 1223 | 2.1885 | | 2.3107 | 2.0 | 2446 | 2.1508 | | 2.2385 | 3.0 | 3669 | 2.0961 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AdapterHub/roberta-base-pf-multirc
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:rc/multirc" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-05-12T16:50:44Z
--- tags: - conversational --- # Harry Potter DialoGPT Model
AdapterHub/roberta-base-pf-race
[ "roberta", "en", "dataset:race", "arxiv:2104.08247", "adapter-transformers", "adapterhub:rc/race" ]
null
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 292.81 +/- 15.85 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Ahmad/parsT5-base
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-jumbling-squad-15 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-jumbling-squad-15 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.3345 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.3629 | 1.0 | 5532 | 1.3345 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Ahmad/parsT5
[ "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - translation - generated_from_trainer metrics: - bleu model-index: - name: en_zu_ukuxhumana_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # en_zu_ukuxhumana_model This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-mul](https://huggingface.co/Helsinki-NLP/opus-mt-en-mul) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0772 - Bleu: 7.6322 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.2 - Datasets 1.18.3 - Tokenizers 0.11.0
Ahmedahmed/Wewe
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - SPGISpeech language: - en license: mit tags: - k2 - icefall --- # SPGISpeech SPGISpeech consists of 5,000 hours of recorded company earnings calls and their respective transcriptions. The original calls were split into slices ranging from 5 to 15 seconds in length to allow easy training for speech recognition systems. Calls represent a broad cross-section of international business English; SPGISpeech contains approximately 50,000 speakers, one of the largest numbers of any speech corpus, and offers a variety of L1 and L2 English accents. The format of each WAV file is single channel, 16kHz, 16 bit audio. Transcription text represents the output of several stages of manual post-processing. As such, the text contains polished English orthography following a detailed style guide, including proper casing, punctuation, and denormalized non-standard words such as numbers and acronyms, making SPGISpeech suited for training fully formatted end-to-end models. Official reference: O’Neill, P.K., Lavrukhin, V., Majumdar, S., Noroozi, V., Zhang, Y., Kuchaiev, O., Balam, J., Dovzhenko, Y., Freyberg, K., Shulman, M.D., Ginsburg, B., Watanabe, S., & Kucsko, G. (2021). SPGISpeech: 5, 000 hours of transcribed financial audio for fully formatted end-to-end speech recognition. ArXiv, abs/2104.02014. ArXiv link: https://arxiv.org/abs/2104.02014 ## Performance Record | Decoding method | val | |---------------------------|------------| | greedy search | 2.40 | | beam search | 2.24 | | modified beam search | 2.30 | | fast beam search | 2.35 |
AimB/mT5-en-kr-opus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en license: mit library_name: timm tags: - image-classification - resnet datasets: beans metrics: - acc - f1 --- # my-cool-model-with-card ## Model description This isn't really a model, it's just a test repo to see if the [modelcards](https://github.com/nateraw/modelcards) package works! ## Intended uses & limitations #### How to use ```python # You can include sample code which will be formatted ``` #### Limitations and bias Provide examples of latent issues and potential remediations. ## Training data Describe the data you used to train the model. If you initialized it with pre-trained weights, add a link to the pre-trained model card or repository with description of the pre-training data. ## Training procedure Preprocessing, hardware used, hyperparameters... ## Eval results Provide some evaluation results. ### BibTeX entry and citation info ```bibtex @inproceedings{..., year={2020} } ```
Aimendo/Triage
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: t5-small-finetuned-spider results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-spider This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1914 - Rouge2 Precision: 0.6349 - Rouge2 Recall: 0.3964 - Rouge2 Fmeasure: 0.4619 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 5 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure | |:-------------:|:-----:|:-----:|:---------------:|:----------------:|:-------------:|:---------------:| | 0.2912 | 1.0 | 1120 | 0.2631 | 0.5653 | 0.3537 | 0.4118 | | 0.2967 | 2.0 | 2240 | 0.2465 | 0.5758 | 0.363 | 0.4209 | | 0.3106 | 3.0 | 3360 | 0.2372 | 0.5858 | 0.367 | 0.427 | | 0.2993 | 4.0 | 4480 | 0.2340 | 0.5995 | 0.3791 | 0.4403 | | 0.2702 | 5.0 | 5600 | 0.2204 | 0.6035 | 0.3786 | 0.4401 | | 0.2624 | 6.0 | 6720 | 0.2159 | 0.6094 | 0.3807 | 0.4435 | | 0.2463 | 7.0 | 7840 | 0.2121 | 0.6207 | 0.3911 | 0.4544 | | 0.2427 | 8.0 | 8960 | 0.2053 | 0.6198 | 0.3886 | 0.452 | | 0.2336 | 9.0 | 10080 | 0.2014 | 0.6217 | 0.3871 | 0.4518 | | 0.2256 | 10.0 | 11200 | 0.1980 | 0.6298 | 0.394 | 0.4589 | | 0.2212 | 11.0 | 12320 | 0.1960 | 0.6304 | 0.3936 | 0.4589 | | 0.2141 | 12.0 | 13440 | 0.1962 | 0.63 | 0.3939 | 0.4586 | | 0.2069 | 13.0 | 14560 | 0.1921 | 0.6328 | 0.3942 | 0.4594 | | 0.2096 | 14.0 | 15680 | 0.1915 | 0.632 | 0.3953 | 0.46 | | 0.2115 | 15.0 | 16800 | 0.1914 | 0.6349 | 0.3964 | 0.4619 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Ajay191191/autonlp-Test-530014983
[ "pytorch", "bert", "text-classification", "en", "dataset:Ajay191191/autonlp-data-Test", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- language: zh pipeline_tag: fill-mask widget: - text: "感冒需要吃[MASK]" - text: "人类的[MASK]温是37度" tags: - bert license: apache-2.0 --- ## Chinese DKPLM (Decomposable Knowledge-enhanced Pre-trained Language Model) for the medical domain For Chinese natural language processing in specific domains, we provide **Chinese DKPLM (Decomposable Knowledge-enhanced Pre-trained Language Model)** for the medical domain named **pai-dkplm-bert-zh**, from our AAAI 2021 paper named **DKPLM: Decomposable Knowledge-enhanced Pre-trained Language Model for Natural Language Understanding**. This repository is developed based on the EasyNLP framework: [https://github.com/alibaba/EasyNLP](https://github.com/alibaba/EasyNLP ) developed by the Alibaba PAI team. Please find the DKPLM tutorial here: [DKPLM Tutorial](https://github.com/alibaba/EasyNLP/tree/master/examples/dkplm_pretraining). ## Citation If you find the resource is useful, please cite the following papers in your work. - For the EasyNLP framework: ``` @article{easynlp, title = {EasyNLP: A Comprehensive and Easy-to-use Toolkit for Natural Language Processing}, publisher = {arXiv}, author = {Wang, Chengyu and Qiu, Minghui and Zhang, Taolin and Liu, Tingting and Li, Lei and Wang, Jianing and Wang, Ming and Huang, Jun and Lin, Wei}, url = {https://arxiv.org/abs/2205.00258}, year = {2022} } ``` - For DKPLM: ``` @article{dkplm, title = {DKPLM: Decomposable Knowledge-enhanced Pre-trained Language Model for Natural Language Understanding}, author = {Zhang, Taolin and Wang, Chengyu and Hu, Nan and Qiu, Minghui and Tang, Chengguang and He, Xiaofeng and Huang, Jun}, url = {https://arxiv.org/abs/2112.01047}, publisher = {AAAI}, year = {2021} } ```
Akbarariza/Anjar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en tags: - text2text-generation - pytorch license: "gpl-3.0" datasets: - samsum widget: - text: "Ruben has forgotten what the homework was. Alex tells him to ask the teacher." example_title: "I forgot my homework" - text: "Mac is lost at the zoo. Frank says he is at the gorilla exhibit. Charlie is going to see the minks." example_title: "Very sunny" - text: "Mac has started to date Dennis's mother. Dennis is going to beat him up." example_title: "Not very sunny" --- # Maeve - SAMSum Maeve is a language model that is similar to BART in structure but trained specially using a CAT (Conditionally Adversarial Transformer). This allows the model to learn to create long-form text from short entries with high degrees of control and coherence that are impossible to achieve with traditional transformers. This specific model has been trained on the SAMSum dataset, and can invert summaries into full-length news articles. Feel free to try examples on the right!
AlErysvi/Erys
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: chanifrusydi/bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # chanifrusydi/bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 5.4528 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.0002, 'decay_steps': 11091, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 5.4528 | 0 | ### Framework versions - Transformers 4.19.0 - TensorFlow 2.8.0 - Datasets 2.2.1 - Tokenizers 0.12.1
AlanDev/dall-e-better
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Introduction See https://github.com/k2-fsa/icefall/pull/330 It has random combiner inside.
AlbertHSU/BertTEST
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8566666666666667 - name: F1 type: f1 value: 0.8571428571428571 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3185 - Accuracy: 0.8567 - F1: 0.8571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Aleksandar/bert-srb-base-cased-oscar
[ "pytorch", "bert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 263.94 +/- 19.22 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Aleksandar/distilbert-srb-ner-setimes
[ "pytorch", "distilbert", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: de license: mit --- # German BERT large fine-tuned to predict educational requirements This is a fine-tuned version of the German BERT large language model [deepset/gbert-large](https://huggingface.co/deepset/gbert-large). The multilabel task this model was trained on was to predict education requirements from job ad texts. The dataset used for training is not available to the public. The 7 labels in the task are (in the classification head order): - `'Bachelor'` - `'Berufsausbildung'` - `'Doktorat oder äquivalent'` - `'Höhere Berufsausbildung'` - `'Master'` - `'Sonstiges'` - `'keine Ausbildungserfordernisse'` The number of representatives of these labels in each of the splits (train/test/val) of the dataset is summarized in the following table: | Label name | All data | Training | Validation | Test | |------------|----------|----------|------------|------| | Bachelor | 521 | 365 | 52 | 104 | | Berufsausbildung | 1854 | 1298 | 185 | 371 | | Doktorat oder äquivalent | 38 | 27 | 4 | 7 | | Höhere Berufsausbildung | 564 | 395 | 56 | 113 | | Master | 245 | 171 | 25 | 49 | | Sonstiges | 819 | 573 | 82 | 164 | | keine Ausbildungserfordernisse | 176 | 123 | 18 | 35 | ## Performance Training consisted of [minimizing the binary cross-entropy (BCE)](https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_minimization) loss between the model's predictions and the actual labels in the training set. During training, a weighted version of the [label ranking average precision (LRAP)](https://scikit-learn.org/stable/modules/model_evaluation.html#label-ranking-average-precision) was tracked for the testing set. LRAP measures what fraction of higher-ranked labels produced by the model were true labels. To account for the label imbalance, the rankings were weighted so that improperly ranked rare labels are penalized more than their more frequent counterparts. After training was complete, the model with highest weighted LRAP was saved. ``` LRAP: 0.96 ``` # See also: - [deepset/gbert-base](https://huggingface.co/deepset/gbert-base) - [deepset/gbert-large](https://huggingface.co/deepset/gbert-large) - [gonzpen/gbert-base-ft-edu-redux](https://huggingface.co/gonzpen/gbert-base-ft-edu-redux) ## Authors Rodrigo C. G. Pena: `rodrigocgp [at] gmail.com`
Aleksandar/electra-srb-ner-setimes
[ "pytorch", "electra", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "ElectraForTokenClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_keras_callback model-index: - name: madatnlp/sk-kogptv2-kormath-causal results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # madatnlp/sk-kogptv2-kormath-causal This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3184 - Validation Loss: 1.4046 - Epoch: 15 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 2.2999999e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 2.7142 | 1.8683 | 0 | | 1.6077 | 1.4417 | 1 | | 1.2458 | 1.3161 | 2 | | 1.0396 | 1.2704 | 3 | | 0.8848 | 1.2818 | 4 | | 0.7634 | 1.2579 | 5 | | 0.6699 | 1.2724 | 6 | | 0.5948 | 1.2718 | 7 | | 0.5306 | 1.3300 | 8 | | 0.4832 | 1.3377 | 9 | | 0.4401 | 1.3038 | 10 | | 0.4053 | 1.3622 | 11 | | 0.3782 | 1.3577 | 12 | | 0.3550 | 1.3696 | 13 | | 0.3347 | 1.3682 | 14 | | 0.3184 | 1.4046 | 15 | ### Framework versions - Transformers 4.19.1 - TensorFlow 2.8.0 - Datasets 2.2.1 - Tokenizers 0.12.1
Aleksandar/electra-srb-oscar
[ "pytorch", "electra", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-05-13T11:30:59Z
--- tags: - generated_from_trainer model-index: - name: vi-finetuned-squad-qa-minilmv2-8 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vi-finetuned-squad-qa-minilmv2-8 This model is a fine-tuned version of [subhasisj/vi-TAPT-MLM-MiniLM](https://huggingface.co/subhasisj/vi-TAPT-MLM-MiniLM) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.3335 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.1669 | 1.0 | 1424 | 1.4979 | | 1.2377 | 2.0 | 2848 | 1.3259 | | 1.0536 | 3.0 | 4272 | 1.3133 | | 0.9568 | 4.0 | 5696 | 1.3103 | | 0.8859 | 5.0 | 7120 | 1.3335 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2 - Datasets 2.0.0 - Tokenizers 0.11.0
Aleksandra/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Training hyperparameters The following hyperparameters were used during training: learning_rate: 7.961395091713594e-05 train_batch_size: 32 eval_batch_size: 32 seed: 27 optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 lr_scheduler_type: linear num_epochs: 5
AlekseyKorshuk/horror-scripts
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9324078664683524 - name: Recall type: recall value: 0.9495119488387749 - name: F1 type: f1 value: 0.9408821812724089 - name: Accuracy type: accuracy value: 0.9864308000235474 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0622 - Precision: 0.9324 - Recall: 0.9495 - F1: 0.9409 - Accuracy: 0.9864 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0862 | 1.0 | 1756 | 0.0649 | 0.9193 | 0.9371 | 0.9281 | 0.9831 | | 0.0406 | 2.0 | 3512 | 0.0576 | 0.9235 | 0.9472 | 0.9352 | 0.9850 | | 0.0197 | 3.0 | 5268 | 0.0622 | 0.9324 | 0.9495 | 0.9409 | 0.9864 | ### Framework versions - Transformers 4.19.0 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AlexMaclean/sentence-compression
[ "pytorch", "distilbert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-base-cased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.0848 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0337 | 1.0 | 5546 | 1.0150 | | 0.7546 | 2.0 | 11092 | 1.0015 | | 0.5537 | 3.0 | 16638 | 1.0848 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
AlexN/xls-r-300m-fr-0
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: en license: mit --- # Fairseq-dense 2.7B - Nerys ## Model Description Fairseq-dense 2.7B-Nerys is a finetune created using Fairseq's MoE dense model. ## Training data The training data contains around 2500 ebooks in various genres (the "Pike" dataset), a CYOA dataset called "CYS" and 50 Asian "Light Novels" (the "Manga-v1" dataset). Most parts of the dataset have been prepended using the following text: `[Genre: <genre1>, <genre2>]` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/fairseq-dense-2.7B-Nerys') >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50) [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}] ``` ### Limitations and Biases Based on known problems with NLP technology, potential relevant factors include bias (gender, profession, race and religion). ### BibTeX entry and citation info ``` Artetxe et al. (2021): Efficient Large Scale Language Modeling with Mixtures of Experts ```
Alexander-Learn/bert-finetuned-squad
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - BipedalWalker-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 226.04 +/- 113.91 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: BipedalWalker-v3 type: BipedalWalker-v3 --- # **PPO** Agent playing **BipedalWalker-v3** This is a trained model of a **PPO** agent playing **BipedalWalker-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Alexandru/creative_copilot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-initialization-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-initialization-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 24 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
Alireza-rw/testbot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training Metrics Model history needed ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Alireza1044/albert-base-v2-mnli
[ "pytorch", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
235
null
--- license: apache-2.0 tags: - image-classification - generated_from_trainer datasets: - cifar10 model-index: - name: vit-base-patch16-224-in21k-finetuned-cifar10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-finetuned-cifar10 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the cifar10 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.10.1 - Datasets 2.1.0 - Tokenizers 0.12.1
Alireza1044/albert-base-v2-sst2
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
52
null
--- library_name: stable-baselines3 tags: - BipedalWalker-v3 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 304.88 +/- 2.29 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: BipedalWalker-v3 type: BipedalWalker-v3 --- # **PPO** Agent playing **BipedalWalker-v3** This is a trained model of a **PPO** agent playing **BipedalWalker-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Alireza1044/albert-base-v2-stsb
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 201.39 +/- 79.26 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
AndreLiu1225/t5-news-summarizer
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 292.99 +/- 18.45 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
AnonARR/qqp-bert
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 228.88 +/- 19.90 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
AnonymousSub/EManuals_BERT_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-05-14T13:23:53Z
--- language: en thumbnail: http://www.huggingtweets.com/dnouri/1652535050986/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/479663838896214016/nZtbm6to_400x400.jpeg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Daniel Nouri</div> <div style="text-align: center; font-size: 14px;">@dnouri</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Daniel Nouri. | Data | Daniel Nouri | | --- | --- | | Tweets downloaded | 3224 | | Retweets | 875 | | Short tweets | 147 | | Tweets kept | 2202 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2d09140r/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @dnouri's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1sbu4o5b) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1sbu4o5b/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/dnouri') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
AnonymousSub/EManuals_RoBERTa_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: mit tags: - generated_from_trainer datasets: - conll2003 model-index: - name: bert-to-distilbert-NER results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-to-distilbert-NER This model is a fine-tuned version of [dslim/bert-base-NER](https://huggingface.co/dslim/bert-base-NER) on the conll2003 dataset. It achieves the following results on the evaluation set: - eval_loss: 5.9063 - eval_precision: 0.0120 - eval_recall: 0.0069 - eval_f1: 0.0088 - eval_accuracy: 0.7600 - eval_runtime: 8.6309 - eval_samples_per_second: 376.671 - eval_steps_per_second: 3.012 - epoch: 1.0 - step: 110 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00023888106906613202 - train_batch_size: 128 - eval_batch_size: 128 - seed: 33 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AnonymousSub/SDR_HF_model_base
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-05-14T13:25:08Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 283.38 +/- 17.68 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
AnonymousSub/SR_cline
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: en license: mit --- # Fairseq-dense 13B - Nerys ## Model Description Fairseq-dense 13B-Nerys is a finetune created using Fairseq's MoE dense model. ## Training data The training data contains around 2500 ebooks in various genres (the "Pike" dataset), a CYOA dataset called "CYS" and 50 Asian "Light Novels" (the "Manga-v1" dataset). Most parts of the dataset have been prepended using the following text: `[Genre: <genre1>, <genre2>]` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/fairseq-dense-13B-Nerys') >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50) [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}] ``` ### Limitations and Biases Based on known problems with NLP technology, potential relevant factors include bias (gender, profession, race and religion). ### BibTeX entry and citation info ``` Artetxe et al. (2021): Efficient Large Scale Language Modeling with Mixtures of Experts ```
AnonymousSub/SR_rule_based_hier_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - summarization - persian - MBart50 - Abstractive Summarization - generated_from_trainer datasets: - xlsum model-index: - name: mbart-large-50-finetuned-persian results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-50-finetuned-persian This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on the xlsum dataset. It achieves the following results on the evaluation set: - Loss: 4.1932 - Rouge-1: 26.11 - Rouge-2: 8.11 - Rouge-l: 21.09 - Gen Len: 37.29 - Bertscore: 71.08 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - label_smoothing_factor: 0.1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge-1 | Rouge-2 | Rouge-l | Gen Len | Bertscore | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:-------:|:---------:| | 5.5612 | 1.0 | 1476 | 4.5015 | 17.07 | 3.14 | 13.54 | 47.49 | 66.83 | | 4.3049 | 2.0 | 2952 | 4.1055 | 22.63 | 5.89 | 18.03 | 40.43 | 69.23 | | 3.8154 | 3.0 | 4428 | 3.9822 | 24.57 | 7.15 | 19.74 | 37.35 | 70.36 | | 3.3401 | 4.0 | 5904 | 4.0088 | 25.84 | 7.96 | 20.95 | 37.56 | 70.83 | | 2.8879 | 5.0 | 7380 | 4.1932 | 26.24 | 8.26 | 21.23 | 37.78 | 71.05 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AnonymousSub/cline-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - conversational --- #DialoGPT with sebastian
AnonymousSub/cline
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "LecbertForPreTraining" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en tags: - mbart-50 license: apache-2.0 datasets: - SLURP metrics: - accuracy - slu-f1 --- This model is `mbart-large-50-many-to-many-mmt` model fine-tuned on the text part of [SLURP](https://github.com/pswietojanski/slurp) spoken language understanding dataset. The scores on the test set are 85.68% and 79.00% for Intent accuracy and SLU-F1 respectively.
AnonymousSub/cline_emanuals
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "LecbertForPreTraining" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8588964027959312 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1383 - F1: 0.8589 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2631 | 1.0 | 525 | 0.1596 | 0.8218 | | 0.1296 | 2.0 | 1050 | 0.1353 | 0.8479 | | 0.0821 | 3.0 | 1575 | 0.1383 | 0.8589 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.11.0+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
AnonymousSub/consert-emanuals-s10-SR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- library_name: stable-baselines3 tags: - FrozenLake-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 0.40 +/- 0.49 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1 type: FrozenLake-v1 --- # **PPO** Agent playing **FrozenLake-v1** This is a trained model of a **PPO** agent playing **FrozenLake-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
AnonymousSub/consert-s10-AR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- datasets: - Matthijs/snacks model-index: - name: matteopilotto/vit-base-patch16-224-in21k-snacks results: - task: type: image-classification name: Image Classification dataset: name: Matthijs/snacks type: Matthijs/snacks config: default split: test metrics: - name: Accuracy type: accuracy value: 0.8928571428571429 verified: true - name: Precision Macro type: precision value: 0.8990033704680036 verified: true - name: Precision Micro type: precision value: 0.8928571428571429 verified: true - name: Precision Weighted type: precision value: 0.8972398709051788 verified: true - name: Recall Macro type: recall value: 0.8914608843537415 verified: true - name: Recall Micro type: recall value: 0.8928571428571429 verified: true - name: Recall Weighted type: recall value: 0.8928571428571429 verified: true - name: F1 Macro type: f1 value: 0.892544821273258 verified: true - name: F1 Micro type: f1 value: 0.8928571428571429 verified: true - name: F1 Weighted type: f1 value: 0.8924168605019522 verified: true - name: loss type: loss value: 0.479541540145874 verified: true --- # Vision Transformer fine-tuned on `Matthijs/snacks` dataset Vision Transformer (ViT) model pre-trained on ImageNet-21k and fine-tuned on [**Matthijs/snacks**](https://huggingface.co/datasets/Matthijs/snacks) for 5 epochs using various data augmentation transformations from `torchvision`. The model achieves a **94.97%** and **94.43%** accuracy on the validation and test set, respectively. ## Data augmentation pipeline The code block below shows the various transformations applied during pre-processing to augment the original dataset. The augmented images where generated on-the-fly with the `set_transform` method. ```python from transformers import ViTFeatureExtractor from torchvision.transforms import ( Compose, Normalize, Resize, RandomResizedCrop, RandomHorizontalFlip, RandomAdjustSharpness, ToTensor ) checkpoint = 'google/vit-base-patch16-224-in21k' feature_extractor = ViTFeatureExtractor.from_pretrained(checkpoint) # transformations on the training set train_aug_transforms = Compose([ RandomResizedCrop(size=feature_extractor.size), RandomHorizontalFlip(p=0.5), RandomAdjustSharpness(sharpness_factor=5, p=0.5), ToTensor(), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ]) # transformations on the validation/test set valid_aug_transforms = Compose([ Resize(size=(feature_extractor.size, feature_extractor.size)), ToTensor(), Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std), ]) ```
AnonymousSub/rule_based_only_classfn_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-32-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-32-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_only_classfn_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-32-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-32-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_only_classfn_twostage_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2022-05-14T20:25:43Z
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_only_classfn_twostage_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2022-05-14T20:30:00Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 200 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-256-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-256-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-05-14T21:53:30Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-256-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-256-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-1024-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-1024-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-1024-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-1024-finetuned-squad-seed-0 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_trainer datasets: - hindi_english_machine_translation metrics: - bleu model-index: - name: mbart-large-cc25-ge-en-to-hi results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: hindi_english_machine_translation type: hindi_english_machine_translation args: hi-en metrics: - name: Bleu type: bleu value: 4.5974 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-large-cc25-ge-en-to-hi This model is a fine-tuned version of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) on the hindi_english_machine_translation dataset. It achieves the following results on the evaluation set: - Loss: 1.3397 - Bleu: 4.5974 - Gen Len: 66.244 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:------:|:---------------:|:------:|:-------:| | 1.4602 | 1.0 | 135739 | 1.3397 | 4.5974 | 66.244 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu102 - Datasets 1.18.0 - Tokenizers 0.12.1
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-1024-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-1024-finetuned-squad-seed-2 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-few-shot-k-1024-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-few-shot-k-1024-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: splinter-large-few-shot-k-1024-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # splinter-large-few-shot-k-1024-finetuned-squad-seed-4 This model is a fine-tuned version of [tau/splinter-large-qass](https://huggingface.co/tau/splinter-large-qass) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-16-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-16-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 128 - seed: 2 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 128 - total_eval_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 75 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-64-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-64-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 128 - seed: 0 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 128 - total_eval_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 75 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/rule_based_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-64-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-64-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 128 - seed: 2 - distributed_type: multi-GPU - num_devices: 2 - total_train_batch_size: 128 - total_eval_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 75 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/specter-bert-model
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xsum model-index: - name: t5-small-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-xsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xsum dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
AnonymousSub/specter-emanuals-model
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 400 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnonymousSub/unsup-consert-base
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 400 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
Anonymreign/savagebeta
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-128-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 400 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
Anorak/nirvana
[ "pytorch", "pegasus", "text2text-generation", "unk", "dataset:Anorak/autonlp-data-Niravana-test2", "transformers", "autonlp", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
AnthonyNelson/DialoGPT-small-ricksanchez
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-2 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
Anthos23/distilbert-base-uncased-finetuned-sst2
[ "tf", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_keras_callback", "license:apache-2.0" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-256-finetuned-squad-seed-4 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
gaurishhs/API
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T04:52:31Z
--- license: mit tags: - generated_from_trainer datasets: - squad model-index: - name: roberta-large-houlsby-few-shot-k-512-finetuned-squad-seed-0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-houlsby-few-shot-k-512-finetuned-squad-seed-0 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20.0 ### Training results ### Framework versions - Transformers 4.17.0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
ArashEsk95/bert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: roberta-base-bne-finetuned-recores results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-bne-finetuned-recores This model is a fine-tuned version of [PlanTL-GOB-ES/roberta-base-bne](https://huggingface.co/PlanTL-GOB-ES/roberta-base-bne) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.1113 - Accuracy: 0.4601 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.5294 | 1.0 | 1047 | 1.4094 | 0.4242 | | 0.6886 | 2.0 | 2094 | 2.1629 | 0.4545 | | 0.0779 | 3.0 | 3141 | 2.3083 | 0.4545 | | 0.0103 | 4.0 | 4188 | 3.0327 | 0.4628 | | 0.0019 | 5.0 | 5235 | 3.1113 | 0.4601 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.1+cu102 - Datasets 2.2.1 - Tokenizers 0.12.1
Aron/distilbert-base-uncased-finetuned-emotion
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:emotion", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- library_name: stable-baselines3 tags: - CarRacing-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: -54.39 +/- 20.08 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: CarRacing-v0 type: CarRacing-v0 --- # **PPO** Agent playing **CarRacing-v0** This is a trained model of a **PPO** agent playing **CarRacing-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Augustvember/WokkaBot2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T11:59:45Z
--- library_name: stable-baselines3 tags: - MountainCar-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: -102.50 +/- 5.73 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: MountainCar-v0 type: MountainCar-v0 --- # **DQN** Agent playing **MountainCar-v0** This is a trained model of a **DQN** agent playing **MountainCar-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Augustvember/WokkaBot4
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T12:00:30Z
--- library_name: stable-baselines3 tags: - MountainCar-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: -100.70 +/- 7.47 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: MountainCar-v0 type: MountainCar-v0 --- # **DQN** Agent playing **MountainCar-v0** This is a trained model of a **DQN** agent playing **MountainCar-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Augustvember/WokkaBotF
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-30T18:01:51Z
--- language: da license: mit datasets: - netarkivet_text_v1 - danews_v1 - hopetwitter_v1 - DDSC/dagw_reddit_filtered_v1.0.0 library_name: jax pipeline_tag: fill-mask --- # Model Card Following [1], the following constitutes a model for this model. --- *Organization developing the Model*: The Danish Foundation Models project *Model Creation Date*: June 2022 *Model Type*: Transformer encoder model [2]; BERT [3] *Feedback on the Model*: For feedback on the model please use the [community forum](https://huggingface.co/chcaa/dfm-bert-base-v1/discussions). *Training logs and performance metrics*: Check out this Weight and biases [Dashboard](https://wandb.ai/chcaa/danish-foundation-models/reports/dfm-bert-base-v1--VmlldzoyODkwMzc2). ## Intended Uses *Primary Intended Uses*: The primary intended use case of this model is the reproduction and validation of dataset quality. The intended use cases for future iterations of this model are the application in industry and research for Danish natural language tasks. *Primary Intended Users*: Future iterations of the model are intended for NLP practitioners dealing with Danish text documents. *Out-of-Scope Uses*: Use of the model for profiling in a way which is inconsiderate of the potential harm it might cause, such as racial profiling. ## Factors *Card prompts - Relevant Factors*: Relevant factors include which language is used. Our model is trained on a Danish text corpus and is intended to compare the training data. *Card prompts - Evaluation Factors*: Future iterations of this model should include a validation of biases pertaining to gender, race, and religious and social groups. ## Metrics *Performance Metrics*: Our model is evaluated on the following performance metrics: - Pseudo perplexity, following [4], across eight distinct domains, including Danish dialects, books, legal, social media (Reddit, Twitter), spontaneous speech, news and Wikipedia. - The Danish subsection of Scandeval [5]. To see the performance metrics, check out this Weight and biases [Dashboard](https://wandb.ai/chcaa/danish-foundation-models/reports/dfm-bert-base-v1--VmlldzoyODkwMzc2). *Decision Threshold*: N/A *Approaches to Uncertainty and Variability*: Due to the cost of training the model is only pre-trained once, but the ScandEval fine-tunes ten times to obtain a reasonable estimate of model performance. ## Evaluation Data *Datasets*: The ScandEval's Danish benchmark includes: - Named entity recognition on DaNE [7,8]. - Part-of-speech tagging and dependency on DDT [8]. - Sentiment classification on AngryTweets [9], TwitterSent [9], Europarl [9], LCC [10] - Hate speech classification on DKHate [11]. *Motivation*: The ScandEval benchmark is the most comprehensive benchmark for Danish. Pseudo perplexity was analysed to examine the model's ability to model certain language domains. ## Training Data For our training data, we sample from HopeTwitter, DaNews, [DAGW](DDSC/dagw_reddit_filtered_v1.0.0) and Netarkivet Text (NAT) with the probabilites; 0.10, 0.10, 0.10, 0.70. For more information on the training and datasets, see the respective datasheets on the Danish foundation models [GitHub page](https://github.com/centre-for-humanities-computing/danish-foundation-models). *Pre-processing*: Input documents are tokenized using the tokenizer of the Danish BERT by BotXO [12], which uses a BPE with a vocabulary size of ~30,000 and NFKC normalization. ## Ethical Considerations *Data*: The is sources from News, DAGW, Twitter, and Netarkivet Text (NAT) and might thus contain hate-speech, sexually explicit content and otherwise harmful content. *Mitigations*: We considered removing sexually explicit content by filtering web domians using a DNS or using google safe-search. However, examining the filtering domains these were also found to include news media pertaining to a specific demographic (e.g. Dagens.dk) and educational sites pertaining to sexual education. We also examined the use of word-based filters, but found that might influence certain demographic groups disproportionally. *Risk and Harms*: As Netarkivet Text cover such a wide array of the Danish internet it undoubtably contains personal information. To avoid model memorization of this information we have deduplicated the data such that the model does not learn this information. # References: - [1] Mitchell, M., Wu, S., Zaldivar, A., Barnes, P., Vasserman, L., Hutchinson, B., Spitzer, E., Raji, I. D., & Gebru, T. (2019). Model Cards for Model Reporting. Proceedings of the Conference on Fairness, Accountability, and Transparency, 220–229. https://doi.org/10.1145/3287560.3287596 - [2] Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., & Polosukhin, I. (2017). Attention Is All You Need. ArXiv:1706.03762 [Cs]. http://arxiv.org/abs/1706.03762 - [3] Devlin, J., Chang, M.-W., Lee, K., & Toutanova, K. (2019). BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. ArXiv:1810.04805 [Cs]. http://arxiv.org/abs/1810.04805 - [4] Salazar, J., Liang, D., Nguyen, T. Q., & Kirchhoff, K. (2020). Masked Language Model Scoring. Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, 2699–2712. https://doi.org/10.18653/v1/2020.acl-main.240 - [6] Nielsen, D. S. (2021). ScandEval: Evaluation of language models on mono- or multilingual Scandinavian language tasks. GitHub. Note: Https://Github.Com/Saattrupdan/ScandEval. - [7] Hvingelby, R., Pauli, A. B., Barrett, M., Rosted, C., Lidegaard, L. M., & Søgaard, A. (2020). DaNE: A named entity resource for danish. Proceedings of the 12th Language Resources and Evaluation Conference, 4597–4604. - [8] Kromann, M. T. (2003). The Danish Dependency Treebank and the DTAG Treebank Tool. https://research.cbs.dk/en/publications/the-danish-dependency-treebank-and-the-dtag-treebank-tool - [9] Alexandrainst/danlp. (2022). Alexandra Institute. https://github.com/alexandrainst/danlp/blob/a1e9fa70fc5a3ae7ff78877062da3a8a8da80758/docs/docs/datasets.md (Original work published 2019) - [10] Nielsen, F. Å. (2022). Lcc-sentiment. https://github.com/fnielsen/lcc-sentiment (Original work published 2016) - [11] Sigurbergsson, G. I., & Derczynski, L. (2020). Offensive Language and Hate Speech Detection for Danish. Proceedings of the 12th Language Resources and Evaluation Conference, 3498–3508. https://aclanthology.org/2020.lrec-1.430 - [12] Møllerhøj, J. D. (2019, December 5). Danish BERT model: BotXO has trained the most advanced BERT model. BotXO. https://www.botxo.ai/blog/danish-bert-model/
Augustvember/wokka
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-05-15T13:40:01Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: Accuracy type: accuracy value: 0.9235 - name: F1 type: f1 value: 0.9235106231638174 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2064 - Accuracy: 0.9235 - F1: 0.9235 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8272 | 1.0 | 250 | 0.2939 | 0.917 | 0.9153 | | 0.2414 | 2.0 | 500 | 0.2064 | 0.9235 | 0.9235 | ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.2.1 - Tokenizers 0.12.1
Aurora/asdawd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T14:10:23Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-arabic-gpu-colab-similar-to-german-bigger-warm-up results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-arabic-gpu-colab-similar-to-german-bigger-warm-up This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6370 - Wer: 0.4146 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 6 - total_train_batch_size: 12 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5000 - num_epochs: 40 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 9.4958 | 2.83 | 400 | 3.4822 | 1.0 | | 3.2281 | 5.67 | 800 | 2.9404 | 1.0 | | 2.942 | 8.51 | 1200 | 2.8690 | 1.0 | | 2.6346 | 11.35 | 1600 | 1.5452 | 0.9994 | | 1.3472 | 14.18 | 2000 | 0.8261 | 0.6853 | | 0.8972 | 17.02 | 2400 | 0.6812 | 0.5737 | | 0.6924 | 19.85 | 2800 | 0.6552 | 0.5291 | | 0.5687 | 22.69 | 3200 | 0.6108 | 0.4909 | | 0.4734 | 25.53 | 3600 | 0.5877 | 0.4674 | | 0.4029 | 28.37 | 4000 | 0.6204 | 0.4662 | | 0.3483 | 31.2 | 4400 | 0.5932 | 0.4451 | | 0.307 | 34.04 | 4800 | 0.6445 | 0.4392 | | 0.2722 | 36.88 | 5200 | 0.6126 | 0.4292 | | 0.2247 | 39.71 | 5600 | 0.6370 | 0.4146 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
Aurora/community.afpglobal
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T14:21:40Z
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bert-base-german-cased-finetuned-subj_preTrained_with_noisyData results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-german-cased-finetuned-subj_preTrained_with_noisyData This model is a fine-tuned version of [bert-base-german-cased](https://huggingface.co/bert-base-german-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0192 - Precision: 0.9203 - Recall: 0.8703 - F1: 0.8946 - Accuracy: 0.9939 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 245 | 0.0258 | 0.9172 | 0.7990 | 0.8540 | 0.9918 | | No log | 2.0 | 490 | 0.0192 | 0.9203 | 0.8703 | 0.8946 | 0.9939 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Aviora/news2vec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-05-15T14:28:21Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 278.51 +/- 23.01 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Aybars/ModelOnWhole
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-05-27T20:47:12Z
--- language: ar license: mit widget: - text: "توكلت في رزقي على الله خالقي وأيقنت أن الله لا شك رازقي." - text: "أي شخص يتوقف عن التعلم هو عجوز، سواء كان في العشرين أو الثمانين." - text: "الحياة رواية جميلة عليك قراءتها حتى النهاية، لا تتوقف أبدا عند سطر حزين قد تكون النهاية جميلة." ---
Aybars/XLM_Turkish
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-05-15T15:16:33Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xlsr-53-tr-fine-tuning results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xlsr-53-tr-fine-tuning-00 This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.3974 - Wer: 0.4784 ## Training Procedure ### Training Hyper-parameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 ### Training Results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.0376 | 4.21 | 400 | 2.4295 | 1.0 | | 0.8375 | 8.42 | 800 | 0.4717 | 0.6291 | | 0.3246 | 12.63 | 1200 | 0.4066 | 0.5528 | | 0.216 | 16.84 | 1600 | 0.4022 | 0.5149 | | 0.1664 | 21.05 | 2000 | 0.3972 | 0.5013 | | 0.1413 | 25.26 | 2400 | 0.3982 | 0.4894 | | 0.1197 | 29.47 | 2800 | 0.3974 | 0.4784 | ### Framework Versions - Transformers 4.19.1 - Pytorch 1.11.0+cu102 - Datasets 2.2.1 - Tokenizers 0.12.1
Ayham/albert_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-05-15T15:18:15Z
--- language: - en tags: - NER - named entity recognition - RE - relation extraction - entity mention detection - EMD - coreference resolution license: apache-2.0 datasets: - Ontonotes - CoNLL04 --- # CoReNer ## Demo We released an online demo so you can easily play with the model. Check it out: [http://corener-demo.aiola-lab.com](http://corener-demo.aiola-lab.com). The demo uses the [aiola/roberta-base-corener](https://huggingface.co/aiola/roberta-base-corener) model. ## Model description A multi-task model for named-entity recognition, relation extraction, entity mention detection, and coreference resolution. We model NER as a span classification task and relation extraction as a multi-label classification of (NER) span tuples. Similarly, model EMD as a span classification task and CR as a binary classification of (EMD) span tuples. To construct the CR clusters, we keep the top antecedent of each mention, then compute the connected components of the mentions' undirected graph. The model was trained to recognize: - Entity types: GPE, ORG, PERSON, DATE, NORP, CARDINAL, MONEY, PERCENT, WORK_OF_ART, ORDINAL, EVENT, LOC, TIME, FAC, QUANTITY, LAW, PRODUCT, LANGUAGE. - Relation types: Kill, Live_In, Located_In, OrgBased_In, Work_For. ## Usage example See additional details and usage examples at: https://github.com/aiola-lab/corener. ```python import json from transformers import AutoTokenizer from corener.models import Corener, ModelOutput from corener.data import MTLDataset from corener.utils.prediction import convert_model_output tokenizer = AutoTokenizer.from_pretrained("aiola/roberta-base-corener") model = Corener.from_pretrained("aiola/roberta-base-corener") model.eval() examples = [ "Apple Park is the corporate headquarters of Apple Inc., located in Cupertino, California, United States. It was opened to employees in April 2017, while construction was still underway, and superseded the original headquarters at 1 Infinite Loop, which opened in 1993." ] dataset = MTLDataset( types=model.config.types, tokenizer=tokenizer, train_mode=False, ) dataset.read_dataset(examples) example = dataset.get_example(0) # get first example output: ModelOutput = model( input_ids=example.encodings, context_masks=example.context_masks, entity_masks=example.entity_masks, entity_sizes=example.entity_sizes, entity_spans=example.entity_spans, entity_sample_masks=example.entity_sample_masks, inference=True, ) print(json.dumps(convert_model_output(output=output, batch=example, dataset=dataset), indent=2)) ```
Ayham/albert_gpt2_Full_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-05-15T15:23:27Z
--- license: apache-2.0 --- A tiny randomly-initialized [ViLT](https://arxiv.org/abs/2102.03334) used for unit tests in the Transformers VQA pipeline
Ayham/albert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-05-15T15:28:32Z
--- language: en thumbnail: http://www.huggingtweets.com/dclblogger-loopifyyy/1652628765621/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1472740175130230784/L7Xcs7wJ_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1480550067564163078/D90SnyUa_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Matty & Loopify 🧙‍♂️</div> <div style="text-align: center; font-size: 14px;">@dclblogger-loopifyyy</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Matty & Loopify 🧙‍♂️. | Data | Matty | Loopify 🧙‍♂️ | | --- | --- | --- | | Tweets downloaded | 3250 | 3250 | | Retweets | 62 | 117 | | Short tweets | 494 | 867 | | Tweets kept | 2694 | 2266 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1pq5pxck/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @dclblogger-loopifyyy's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/as5uacn5) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/as5uacn5/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/dclblogger-loopifyyy') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Ayham/bert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-05-15T15:57:14Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - metrics: - type: mean_reward value: 225.63 +/- 80.78 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **DQN** Agent playing **LunarLander-v2** This is a trained model of a **DQN** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Ayham/bert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-05-15T16:26:24Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 274.72 +/- 15.58 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code
Ayham/bertgpt2_cnn
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_trainer datasets: - mt_eng_vietnamese metrics: - bleu model-index: - name: t5vi-finetuned-en-to-vi results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: mt_eng_vietnamese type: mt_eng_vietnamese args: iwslt2015-en-vi metrics: - name: Bleu type: bleu value: 13.547 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5vi-finetuned-en-to-vi This model is a fine-tuned version of [imthanhlv/t5vi](https://huggingface.co/imthanhlv/t5vi) on the mt_eng_vietnamese dataset. It achieves the following results on the evaluation set: - Loss: 1.3827 - Bleu: 13.547 - Gen Len: 17.3719 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 20 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 1.8026 | 1.0 | 6666 | 1.5907 | 10.9756 | 17.3231 | | 1.6217 | 2.0 | 13332 | 1.4635 | 12.375 | 17.3444 | | 1.5087 | 3.0 | 19998 | 1.4131 | 13.1828 | 17.3924 | | 1.4446 | 4.0 | 26664 | 1.3915 | 13.5217 | 17.3617 | | 1.4076 | 5.0 | 33330 | 1.3827 | 13.547 | 17.3719 | ### Framework versions - Transformers 4.19.1 - Pytorch 1.11.0+cu113 - Datasets 2.2.1 - Tokenizers 0.12.1
Ayham/distilbert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: ar license: mit widget: - text: "إن العيون التي في طرفها حور [شطر] قتلننا ثم لم يحيين قتلانا" - text: "إذا ما فعلت الخير ضوعف شرهم [شطر] وكل إناء بالذي فيه ينضح" - text: "واحر قلباه ممن قلبه شبم [شطر] ومن بجسمي وحالي عنده سقم" ---
Ayham/distilbert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: es tags: - sagemaker - roberta-bne - TextClassification - SentimentAnalysis license: apache-2.0 datasets: - IMDbreviews_es metrics: - accuracy model-index: - name: roberta_bne_sentiment_analysis_es results: - task: name: Sentiment Analysis type: sentiment-analysis dataset: name: "IMDb Reviews in Spanish" type: IMDbreviews_es metrics: - name: Accuracy type: accuracy value: 0.9106666666666666 - name: F1 Score type: f1 value: 0.9090909090909091 - name: Precision type: precision value: 0.9063852813852814 - name: Recall type: recall value: 0.9118127381600436 widget: - text: "Se trata de una película interesante, con un solido argumento y un gran interpretación de su actor principal" --- # Model roberta_bne_sentiment_analysis_es ## **A finetuned model for Sentiment analysis in Spanish** This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container, The base model is **RoBERTa-base-bne** which is a RoBERTa base model and has been pre-trained using the largest Spanish corpus known to date, with a total of 570GB. It was trained by The [National Library of Spain (Biblioteca Nacional de España)](http://www.bne.es/en/Inicio/index.html) **RoBERTa BNE Citation** Check out the paper for all the details: https://arxiv.org/abs/2107.07253 ``` @article{gutierrezfandino2022, author = {Asier Gutiérrez-Fandiño and Jordi Armengol-Estapé and Marc Pàmies and Joan Llop-Palao and Joaquin Silveira-Ocampo and Casimiro Pio Carrino and Carme Armentano-Oller and Carlos Rodriguez-Penagos and Aitor Gonzalez-Agirre and Marta Villegas}, title = {MarIA: Spanish Language Models}, journal = {Procesamiento del Lenguaje Natural}, volume = {68}, number = {0}, year = {2022}, issn = {1989-7553}, url = {http://journal.sepln.org/sepln/ojs/ojs/index.php/pln/article/view/6405}, pages = {39--60} } ``` ## Dataset The dataset is a collection of movie reviews in Spanish, about 50,000 reviews. The dataset is balanced and provides every review in english, in spanish and the label in both languages. Sizes of datasets: - Train dataset: 42,500 - Validation dataset: 3,750 - Test dataset: 3,750 ## Intended uses & limitations This model is intented for Sentiment Analysis for spanish corpus and finetuned specially for movie reviews but it can be applied to other kind of reviews. ## Hyperparameters { "epochs": "4", "train_batch_size": "32", "eval_batch_size": "8", "fp16": "true", "learning_rate": "3e-05", "model_name": "\"PlanTL-GOB-ES/roberta-base-bne\"", "sagemaker_container_log_level": "20", "sagemaker_program": "\"train.py\"", } ## Evaluation results - Accuracy = 0.9106666666666666 - F1 Score = 0.9090909090909091 - Precision = 0.9063852813852814 - Recall = 0.9118127381600436 ## Test results ## Model in action ### Usage for Sentiment Analysis ```python import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("edumunozsala/roberta_bne_sentiment_analysis_es") model = AutoModelForSequenceClassification.from_pretrained("edumunozsala/roberta_bne_sentiment_analysis_es") text ="Se trata de una película interesante, con un solido argumento y un gran interpretación de su actor principal" input_ids = torch.tensor(tokenizer.encode(text)).unsqueeze(0) outputs = model(input_ids) output = outputs.logits.argmax(1) ``` Created by [Eduardo Muñoz/@edumunozsala](https://github.com/edumunozsala)
Ayham/ernie_gpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: ar license: mit widget: - text: "توكلت في رزقي على الله خالقي وأيقنت أن الله لا شك رازقي." - text: "أي شخص يتوقف عن التعلم هو عجوز، سواء كان في العشرين أو الثمانين." - text: "الحياة رواية جميلة عليك قراءتها حتى النهاية، لا تتوقف أبدا عند سطر حزين قد تكون النهاية جميلة." ---