modelId
stringlengths 4
112
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 21
values | files
list | publishedBy
stringlengths 2
37
| downloads_last_month
int32 0
9.44M
| library
stringclasses 15
values | modelCard
large_stringlengths 0
100k
|
---|---|---|---|---|---|---|---|---|
sshleifer/student_xsum_9_9 | 2021-06-14T10:16:45.000Z | [
"pytorch",
"jax",
"bart",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 23 | transformers | |
sshleifer/t5-base-cnn | 2020-07-02T03:28:24.000Z | [
"pytorch",
"t5",
"lm-head",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"colin_preds.txt",
"colin_targets.txt",
"config.json",
"pytorch_model.bin"
]
| sshleifer | 63 | transformers | |
sshleifer/t5-tinier-random | 2020-11-09T14:05:06.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| sshleifer | 9 | transformers | |
sshleifer/tinier_bart | 2021-06-14T09:08:24.000Z | [
"pytorch",
"jax",
"bart",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 5,717 | transformers | |
sshleifer/tiny-ctrl | 2020-05-13T23:21:48.000Z | [
"pytorch",
"tf",
"ctrl",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 44,902 | transformers | |
sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english | 2021-05-20T07:12:23.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"token-classification",
"transformers"
]
| token-classification | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| sshleifer | 82,022 | transformers | |
sshleifer/tiny-distilbert-base-cased-distilled-squad | 2020-05-14T16:54:23.000Z | [
"pytorch",
"tf",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| sshleifer | 48,649 | transformers | |
sshleifer/tiny-distilbert-base-cased | 2021-05-20T07:12:39.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"token-classification",
"transformers"
]
| token-classification | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| sshleifer | 30,439 | transformers | |
sshleifer/tiny-distilbert-base-uncased-finetuned-sst-2-english | 2020-05-12T01:51:10.000Z | [
"pytorch",
"tf",
"distilbert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| sshleifer | 61,337 | transformers | |
sshleifer/tiny-distilroberta-base | 2021-05-20T21:55:56.000Z | [
"pytorch",
"tf",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 50,050 | transformers | |
sshleifer/tiny-gpt2 | 2021-05-23T12:55:11.000Z | [
"pytorch",
"tf",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 37,592 | transformers | |
sshleifer/tiny-marian-en-de | 2020-06-25T02:27:15.000Z | [
"pytorch",
"marian",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"source.spm",
"special_tokens_map.json",
"target.spm",
"tokenizer_config.json",
"vocab.json"
]
| sshleifer | 11 | transformers | |
sshleifer/tiny-mbart | 2020-06-25T02:23:32.000Z | [
"pytorch",
"mbart",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json"
]
| sshleifer | 27,060 | transformers | |
sshleifer/tiny-xlnet-base-cased | 2020-05-08T15:35:32.000Z | [
"pytorch",
"xlnet",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| sshleifer | 30 | transformers | |
sssanthosh107/Sample1 | 2021-02-23T06:16:55.000Z | []
| [
".gitattributes"
]
| sssanthosh107 | 0 | |||
sssanthosh107/sampl | 2021-02-23T06:51:21.000Z | []
| [
".gitattributes"
]
| sssanthosh107 | 0 | |||
ssun32/bert_base_nli_turkle | 2021-05-20T07:13:17.000Z | [
"pytorch",
"jax",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"sentence_bert_config.json",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| ssun32 | 16 | transformers | ||
ssun32/bert_twitter_turkle | 2021-05-20T07:14:10.000Z | [
"pytorch",
"jax",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"sentence_bert_config.json",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| ssun32 | 17 | transformers | ||
stanleychu2/roberta-fever | 2021-06-15T21:43:15.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"config.json",
"merges.txt",
"optimizer.pt",
"pytorch_model.bin",
"rng_state.pth",
"scaler.pt",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"trainer_state.json",
"training_args.bin",
"vocab.json"
]
| stanleychu2 | 0 | transformers | |
stas/mt5-tiny-random | 2021-04-21T02:34:20.000Z | [
"pytorch",
"mt5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"mt5-make-tiny-model.py",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| stas | 162 | transformers | This is a tiny random mt5 model used for testing
See `mt5-make-tiny-model.py` for how it was created. |
stas/t5-very-small-random | 2021-04-21T02:34:01.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"t5-make-very-small-model.py",
"tokenizer.json",
"tokenizer_config.json",
"unigram.json"
]
| stas | 7 | transformers | This is a tiny random t5 model used for testing
See `t5-make-very-small-model.py` for how it was created. |
stas/tiny-wmt19-en-de | 2021-05-03T01:48:44.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"en",
"de",
"dataset:wmt19",
"transformers",
"wmt19",
"testing",
"license:apache-2.0",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"fsmt-make-tiny-model.py",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| stas | 85 | transformers | ---
language:
- en
- de
thumbnail:
tags:
- wmt19
- testing
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# Tiny FSMT en-de
This is a tiny model that is used in the `transformers` test suite. It doesn't do anything useful, other than testing that `modeling_fsmt.py` is functional.
Do not try to use it for anything that requires quality.
The model is indeed 1MB in size.
You can see how it was created [here](https://huggingface.co/stas/tiny-wmt19-en-de/blob/main/fsmt-make-tiny-model.py).
If you're looking for the real model, please go to [https://huggingface.co/facebook/wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de).
|
stas/tiny-wmt19-en-ru | 2021-05-03T01:47:47.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"en",
"ru",
"dataset:wmt19",
"transformers",
"wmt19",
"testing",
"license:apache-2.0",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"fsmt-make-super-tiny-model.py",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| stas | 32 | transformers | ---
language:
- en
- ru
thumbnail:
tags:
- wmt19
- testing
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# Tiny FSMT en-ru
This is a tiny model that is used in the `transformers` test suite. It doesn't do anything useful, other than testing that `modeling_fsmt.py` is functional.
Do not try to use it for anything that requires quality.
The model is indeed 30KB in size.
You can see how it was created [here](https://huggingface.co/stas/tiny-wmt19-en-ru/blob/main/fsmt-make-super-tiny-model.py).
If you're looking for the real model, please go to [https://huggingface.co/facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru).
|
stefan-it/bort-full | 2020-12-16T13:06:42.000Z | [
"pytorch",
"bort",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| stefan-it | 23 | transformers | |
stefan-it/bort | 2021-05-20T07:14:56.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.json"
]
| stefan-it | 13 | transformers | |
stefan-it/electra-base-gc4-64k-0-cased-discriminator | 2021-04-30T22:16:19.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index",
"model.ckpt-0.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦. |
|
stefan-it/electra-base-gc4-64k-0-cased-generator | 2021-04-30T22:25:17.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index",
"model.ckpt-0.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-100000-cased-discriminator | 2021-04-30T22:33:21.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-100000.data-00000-of-00001",
"model.ckpt-100000.index",
"model.ckpt-100000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 11 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-100000-cased-generator | 2021-05-01T11:16:57.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-100000.data-00000-of-00001",
"model.ckpt-100000.index",
"model.ckpt-100000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-1000000-cased-discriminator | 2021-05-01T11:13:39.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-1000000.data-00000-of-00001",
"model.ckpt-1000000.index",
"model.ckpt-1000000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-1000000-cased-generator | 2021-05-01T11:24:59.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-1000000.data-00000-of-00001",
"model.ckpt-1000000.index",
"model.ckpt-1000000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 43 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-200000-cased-discriminator | 2021-04-30T22:36:06.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-200000.data-00000-of-00001",
"model.ckpt-200000.index",
"model.ckpt-200000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 12 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-200000-cased-generator | 2021-05-01T11:17:26.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-200000.data-00000-of-00001",
"model.ckpt-200000.index",
"model.ckpt-200000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 9 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-300000-cased-discriminator | 2021-04-30T22:38:04.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-300000.data-00000-of-00001",
"model.ckpt-300000.index",
"model.ckpt-300000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 9 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-300000-cased-generator | 2021-05-01T11:18:30.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-300000.data-00000-of-00001",
"model.ckpt-300000.index",
"model.ckpt-300000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-400000-cased-discriminator | 2021-04-30T22:41:07.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-400000.data-00000-of-00001",
"model.ckpt-400000.index",
"model.ckpt-400000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-400000-cased-generator | 2021-05-01T11:19:45.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-400000.data-00000-of-00001",
"model.ckpt-400000.index",
"model.ckpt-400000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 11 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-500000-cased-discriminator | 2021-05-01T07:47:50.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-500000.data-00000-of-00001",
"model.ckpt-500000.index",
"model.ckpt-500000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 6 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-500000-cased-generator | 2021-05-01T11:20:11.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-500000.data-00000-of-00001",
"model.ckpt-500000.index",
"model.ckpt-500000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 11 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-600000-cased-discriminator | 2021-05-01T07:52:54.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-600000.data-00000-of-00001",
"model.ckpt-600000.index",
"model.ckpt-600000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 9 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-600000-cased-generator | 2021-05-01T11:21:31.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-600000.data-00000-of-00001",
"model.ckpt-600000.index",
"model.ckpt-600000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 11 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-700000-cased-discriminator | 2021-05-01T09:41:36.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-700000.data-00000-of-00001",
"model.ckpt-700000.index",
"model.ckpt-700000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 14 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-700000-cased-generator | 2021-05-01T11:21:51.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-700000.data-00000-of-00001",
"model.ckpt-700000.index",
"model.ckpt-700000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 7 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-800000-cased-discriminator | 2021-05-01T09:46:59.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-800000.data-00000-of-00001",
"model.ckpt-800000.index",
"model.ckpt-800000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 12 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-800000-cased-generator | 2021-05-01T11:23:30.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-800000.data-00000-of-00001",
"model.ckpt-800000.index",
"model.ckpt-800000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 12 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/electra-base-gc4-64k-900000-cased-discriminator | 2021-05-01T11:11:31.000Z | [
"pytorch",
"tf",
"electra",
"pretraining",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit"
]
| [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-900000.data-00000-of-00001",
"model.ckpt-900000.index",
"model.ckpt-900000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 10 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
|
stefan-it/electra-base-gc4-64k-900000-cased-generator | 2021-05-01T11:24:01.000Z | [
"pytorch",
"tf",
"electra",
"masked-lm",
"de",
"dataset:german-nlp-group/german_common_crawl",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"model.ckpt-900000.data-00000-of-00001",
"model.ckpt-900000.index",
"model.ckpt-900000.meta",
"pytorch_model.bin",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| stefan-it | 12 | transformers | ---
language: de
license: mit
datasets:
- german-nlp-group/german_common_crawl
widget:
- text: "Heute ist ein [MASK] Tag"
---
# GC4LM: A Colossal (Biased) language model for German
This repository presents a colossal (and biased) language model for German trained on the recently released
["German colossal, clean Common Crawl corpus"](https://german-nlp-group.github.io/projects/gc4-corpus.html) (GC4),
with a total dataset size of ~844GB.
---
**Disclaimer**: the presented and trained language models in this repository are for **research only** purposes.
The GC4 corpus - that was used for training - contains crawled texts from the internet. Thus, the language models can
be considered as highly biased, resulting in a model that encodes stereotypical associations along gender, race,
ethnicity and disability status. Before using and working with the released checkpoints, it is highly recommended
to read:
[On the Dangers of Stochastic Parrots: Can Language Models Be Too Big?](https://faculty.washington.edu/ebender/papers/Stochastic_Parrots.pdf)
from Emily M. Bender, Timnit Gebru, Angelina McMillan-Major and Shmargaret Shmitchell.
The aim of the released checkpoints is to boost research on large pre-trained language models for German, especially
for identifying biases and how to prevent them, as most research is currently done only for English.
---
Please use the new GitHub Discussions feature in order to discuss or present further research questions.
Feel free to use `#gc4lm` on Twitter 🐦.
|
stefan-it/flair-ner-conll03 | 2020-12-11T10:07:20.000Z | [
"pytorch",
"en",
"flair",
"sequence-tagger-model",
"license:mit"
]
| [
".gitattributes",
"README.md",
"pytorch_model.bin"
]
| stefan-it | 0 | flair | ---
language: en
tags:
- flair
- sequence-tagger-model
license: mit
---
# CoNLL-2003 NER Model
Imported sequence tagger model for Flair, that was trained on English CoNLL-2003 corpus for NER.
|
|
stefan-it/wav2vec2-large-xlsr-53-basque | 2021-03-29T15:54:40.000Z | [
"pytorch",
"wav2vec2",
"eu",
"dataset:common_voice",
"transformers",
"audio",
"automatic-speech-recognition",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"trainer_state.json",
"training_args.bin",
"vocab.json"
]
| stefan-it | 9 | transformers | ---
language: eu
datasets:
- common_voice
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Basque Stefan Schweter
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice eu
type: common_voice
args: eu
metrics:
- name: Test WER
type: wer
value: 18.272625
---
# Wav2Vec2-Large-XLSR-53-Basque
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Basque using the [Common Voice](https://huggingface.co/datasets/common_voice).
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "eu", split="test[:2%]").
processor = Wav2Vec2Processor.from_pretrained("stefan-it/wav2vec2-large-xlsr-53-basque")
model = Wav2Vec2ForCTC.from_pretrained("stefan-it/wav2vec2-large-xlsr-53-basque")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Basque test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "eu", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("stefan-it/wav2vec2-large-xlsr-53-basque")
model = Wav2Vec2ForCTC.from_pretrained("stefan-it/wav2vec2-large-xlsr-53-basque")
model.to("cuda")
chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\”\\\\�]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 18.272625%
## Training
The Common Voice `train`, `validation` datasets were used for training.
The script used for training can be found here, hopefully very soon!
## Acknowledgements
Many thanks to the [OVH team](https://www.ovhcloud.com) for providing access to a V-100 instance. Without their help,
fine-tuning would not be possible!
I would also thank [Manuel Romero](https://github.com/mrm8488) (mrm8488) for helping with the fine-tuning script! |
sterchelen/lb-test | 2021-05-25T15:06:21.000Z | []
| [
".gitattributes",
"test"
]
| sterchelen | 0 | |||
sterchelen/test | 2021-06-03T09:26:05.000Z | []
| [
".gitattributes",
"README.md",
"test.bin"
]
| sterchelen | 0 | # Test Readme
Sharing is learning... <3
|
||
stevenshoemaker/horror | 2021-05-23T12:56:03.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| stevenshoemaker | 14 | transformers | |
stevenshoemaker/horrormovies | 2021-01-10T00:56:20.000Z | [
"tensorboard"
]
| [
".gitattributes",
"run1/.gitattributes",
"run1/checkpoint",
"run1/counter",
"run1/encoder.json",
"run1/events.out.tfevents.1610150913.7331ac776a28",
"run1/hparams.json",
"run1/model-1000.data-00000-of-00001",
"run1/model-1000.index",
"run1/model-1000.meta",
"run1/vocab.bpe"
]
| stevenshoemaker | 0 | |||
stevenshoemaker/horrors | 2021-05-23T12:57:05.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| stevenshoemaker | 6 | transformers | |
stevenshoemaker/pitchfork | 2021-05-23T12:58:02.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| stevenshoemaker | 6 | transformers | |
stevhliu/astroGPT | 2021-05-23T12:59:14.000Z | [
"pytorch",
"tf",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"en",
"transformers",
"text-generation"
]
| text-generation | [
".DS_Store",
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.json"
]
| stevhliu | 57 | transformers | ---
language: "en"
thumbnail: "https://raw.githubusercontent.com/stevhliu/satsuma/master/images/astroGPT-thumbnail.png"
widget:
- text: "Jan 18, 2020"
- text: "Feb 14, 2020"
- text: "Jul 04, 2020"
---
# astroGPT 🪐
## Model description
This is a GPT-2 model fine-tuned on Western zodiac signs. For more information about GPT-2, take a look at 🤗 Hugging Face's GPT-2 [model card](https://huggingface.co/gpt2). You can use astroGPT to generate a daily horoscope by entering the current date.
## How to use
To use this model, simply enter the current date like so `Mon DD, YEAR`:
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("stevhliu/astroGPT")
model = AutoModelWithLMHead.from_pretrained("stevhliu/astroGPT")
input_ids = tokenizer.encode('Sep 03, 2020', return_tensors='pt').to('cuda')
sample_output = model.generate(input_ids,
do_sample=True,
max_length=75,
top_k=20,
top_p=0.97)
print(sample_output)
```
## Limitations and bias
astroGPT inherits the same biases that affect GPT-2 as a result of training on a lot of non-neutral content on the internet. The model does not currently support zodiac sign-specific generation and only returns a general horoscope. While the generated text may occasionally mention a specific zodiac sign, this is due to how the horoscopes were originally written by it's human authors.
## Data
The data was scraped from [Horoscope.com](https://www.horoscope.com/us/index.aspx) and trained on 4.7MB of text. The text was collected from four categories (daily, love, wellness, career) and span from 09/01/19 to 08/01/2020. The archives only store horoscopes dating a year back from the current date.
## Training and results
The text was tokenized using the fast GPT-2 BPE [tokenizer](https://huggingface.co/transformers/model_doc/gpt2.html#gpt2tokenizerfast). It has a vocabulary size of 50,257 and sequence length of 1024 tokens. The model was trained with on one of Google Colaboratory's GPU's for approximately 2.5 hrs with [fastai's](https://docs.fast.ai/) learning rate finder, discriminative learning rates and 1cycle policy. See table below for a quick summary of the training procedure and results.
| dataset size | epochs | lr | training time | train_loss | valid_loss | perplexity |
|:-------------:|:------:|:-----------------:|:-------------:|:----------:|:----------:|:----------:|
| 5.9MB |32 | slice(1e-7,1e-5) | 2.5 hrs | 2.657170 | 2.642387 | 14.046692 |
|
stfuowned/nek | 2021-06-08T18:38:27.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"conversational",
"text-generation"
]
| conversational | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"readme.md",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| stfuowned | 78 | transformers | ---
tags:
- conversational
---
# My Awesome Model |
stfuowned/rick-small | 2021-06-08T06:09:39.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| stfuowned | 1 | transformers | |
stfuowned/rick | 2021-06-08T05:50:30.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"conversational",
"text-generation"
]
| conversational | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| stfuowned | 157 | transformers | ---
tags:
- conversational
---
# My Awesome Model |
stiel/kkykykyjykyk | 2021-03-07T15:00:33.000Z | []
| [
".gitattributes",
"README.md"
]
| stiel | 0 | |||
stiel/ksjfhsjdkgfjsdh | 2021-03-07T14:48:52.000Z | []
| [
".gitattributes",
"README.md"
]
| stiel | 0 | |||
stiel/lierrrrr | 2021-03-07T15:04:23.000Z | []
| [
".gitattributes",
"README.md"
]
| stiel | 0 | |||
stlalpha/tootius | 2021-05-17T13:41:43.000Z | []
| [
".gitattributes"
]
| stlalpha | 0 | |||
stocksenti/stocksentiBertFinancial | 2021-03-21T14:41:49.000Z | []
| [
".gitattributes"
]
| stocksenti | 0 | |||
stolenpyjak/testing_model | 2021-05-20T21:56:30.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| stolenpyjak | 20 | transformers | |
stuart/lucy | 2021-04-10T03:15:48.000Z | []
| [
".gitattributes"
]
| stuart | 0 | |||
studio-ousia/luke-base | 2021-04-25T06:35:46.000Z | [
"pytorch",
"luke",
"en",
"arxiv:1906.08237",
"arxiv:1903.07785",
"arxiv:2002.01808",
"transformers",
"named entity recognition",
"entity typing",
"relation classification",
"question answering",
"license:apache-2.0"
]
| [
".gitattributes",
"README.md",
"added_tokens.json",
"config.json",
"entity_vocab.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| studio-ousia | 2,126 | transformers | ---
language: en
thumbnail: https://github.com/studio-ousia/luke/raw/master/resources/luke_logo.png
tags:
- luke
- named entity recognition
- entity typing
- relation classification
- question answering
license: apache-2.0
---
## LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention
**LUKE** (**L**anguage **U**nderstanding with **K**nowledge-based
**E**mbeddings) is a new pre-trained contextualized representation of words and
entities based on transformer. LUKE treats words and entities in a given text as
independent tokens, and outputs contextualized representations of them. LUKE
adopts an entity-aware self-attention mechanism that is an extension of the
self-attention mechanism of the transformer, and considers the types of tokens
(words or entities) when computing attention scores.
LUKE achieves state-of-the-art results on five popular NLP benchmarks including
**[SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/)** (extractive
question answering),
**[CoNLL-2003](https://www.clips.uantwerpen.be/conll2003/ner/)** (named entity
recognition), **[ReCoRD](https://sheng-z.github.io/ReCoRD-explorer/)**
(cloze-style question answering),
**[TACRED](https://nlp.stanford.edu/projects/tacred/)** (relation
classification), and
**[Open Entity](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html)**
(entity typing).
Please check the [official repository](https://github.com/studio-ousia/luke) for
more details and updates.
This is the LUKE base model with 12 hidden layers, 768 hidden size. The total number
of parameters in this model is 253M. It is trained using December 2018 version of
Wikipedia.
### Experimental results
The experimental results are provided as follows:
| Task | Dataset | Metric | LUKE-large | luke-base | Previous SOTA |
| ------------------------------ | ---------------------------------------------------------------------------- | ------ | ----------------- | --------- | ------------------------------------------------------------------------- |
| Extractive Question Answering | [SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/) | EM/F1 | **90.2**/**95.4** | 86.1/92.3 | 89.9/95.1 ([Yang et al., 2019](https://arxiv.org/abs/1906.08237)) |
| Named Entity Recognition | [CoNLL-2003](https://www.clips.uantwerpen.be/conll2003/ner/) | F1 | **94.3** | 93.3 | 93.5 ([Baevski et al., 2019](https://arxiv.org/abs/1903.07785)) |
| Cloze-style Question Answering | [ReCoRD](https://sheng-z.github.io/ReCoRD-explorer/) | EM/F1 | **90.6**/**91.2** | - | 83.1/83.7 ([Li et al., 2019](https://www.aclweb.org/anthology/D19-6011/)) |
| Relation Classification | [TACRED](https://nlp.stanford.edu/projects/tacred/) | F1 | **72.7** | - | 72.0 ([Wang et al. , 2020](https://arxiv.org/abs/2002.01808)) |
| Fine-grained Entity Typing | [Open Entity](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html) | F1 | **78.2** | - | 77.6 ([Wang et al. , 2020](https://arxiv.org/abs/2002.01808)) |
### Citation
If you find LUKE useful for your work, please cite the following paper:
```latex
@inproceedings{yamada2020luke,
title={LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention},
author={Ikuya Yamada and Akari Asai and Hiroyuki Shindo and Hideaki Takeda and Yuji Matsumoto},
booktitle={EMNLP},
year={2020}
}
```
|
|
studio-ousia/luke-large-finetuned-conll-2003 | 2021-04-26T16:09:42.000Z | [
"pytorch",
"luke",
"transformers"
]
| [
".gitattributes",
"added_tokens.json",
"config.json",
"entity_vocab.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| studio-ousia | 1,637 | transformers | ||
studio-ousia/luke-large-finetuned-open-entity | 2021-04-26T16:10:58.000Z | [
"pytorch",
"luke",
"transformers"
]
| [
".gitattributes",
"added_tokens.json",
"config.json",
"entity_vocab.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| studio-ousia | 235 | transformers | ||
studio-ousia/luke-large-finetuned-tacred | 2021-04-26T16:10:26.000Z | [
"pytorch",
"luke",
"transformers"
]
| [
".gitattributes",
"added_tokens.json",
"config.json",
"entity_vocab.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| studio-ousia | 623 | transformers | ||
studio-ousia/luke-large | 2021-04-25T06:33:41.000Z | [
"pytorch",
"luke",
"en",
"arxiv:1906.08237",
"arxiv:1903.07785",
"arxiv:2002.01808",
"transformers",
"named entity recognition",
"entity typing",
"relation classification",
"question answering",
"license:apache-2.0"
]
| [
".gitattributes",
"README.md",
"added_tokens.json",
"config.json",
"entity_vocab.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| studio-ousia | 701 | transformers | ---
language: en
thumbnail: https://github.com/studio-ousia/luke/raw/master/resources/luke_logo.png
tags:
- luke
- named entity recognition
- entity typing
- relation classification
- question answering
license: apache-2.0
---
## LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention
**LUKE** (**L**anguage **U**nderstanding with **K**nowledge-based
**E**mbeddings) is a new pre-trained contextualized representation of words and
entities based on transformer. LUKE treats words and entities in a given text as
independent tokens, and outputs contextualized representations of them. LUKE
adopts an entity-aware self-attention mechanism that is an extension of the
self-attention mechanism of the transformer, and considers the types of tokens
(words or entities) when computing attention scores.
LUKE achieves state-of-the-art results on five popular NLP benchmarks including
**[SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/)** (extractive
question answering),
**[CoNLL-2003](https://www.clips.uantwerpen.be/conll2003/ner/)** (named entity
recognition), **[ReCoRD](https://sheng-z.github.io/ReCoRD-explorer/)**
(cloze-style question answering),
**[TACRED](https://nlp.stanford.edu/projects/tacred/)** (relation
classification), and
**[Open Entity](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html)**
(entity typing).
Please check the [official repository](https://github.com/studio-ousia/luke) for
more details and updates.
This is the LUKE large model with 24 hidden layers, 1024 hidden size. The total number
of parameters in this model is 483M. It is trained using December 2018 version of
Wikipedia.
### Experimental results
The experimental results are provided as follows:
| Task | Dataset | Metric | LUKE-large | luke-base | Previous SOTA |
| ------------------------------ | ---------------------------------------------------------------------------- | ------ | ----------------- | --------- | ------------------------------------------------------------------------- |
| Extractive Question Answering | [SQuAD v1.1](https://rajpurkar.github.io/SQuAD-explorer/) | EM/F1 | **90.2**/**95.4** | 86.1/92.3 | 89.9/95.1 ([Yang et al., 2019](https://arxiv.org/abs/1906.08237)) |
| Named Entity Recognition | [CoNLL-2003](https://www.clips.uantwerpen.be/conll2003/ner/) | F1 | **94.3** | 93.3 | 93.5 ([Baevski et al., 2019](https://arxiv.org/abs/1903.07785)) |
| Cloze-style Question Answering | [ReCoRD](https://sheng-z.github.io/ReCoRD-explorer/) | EM/F1 | **90.6**/**91.2** | - | 83.1/83.7 ([Li et al., 2019](https://www.aclweb.org/anthology/D19-6011/)) |
| Relation Classification | [TACRED](https://nlp.stanford.edu/projects/tacred/) | F1 | **72.7** | - | 72.0 ([Wang et al. , 2020](https://arxiv.org/abs/2002.01808)) |
| Fine-grained Entity Typing | [Open Entity](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html) | F1 | **78.2** | - | 77.6 ([Wang et al. , 2020](https://arxiv.org/abs/2002.01808)) |
### Citation
If you find LUKE useful for your work, please cite the following paper:
```latex
@inproceedings{yamada2020luke,
title={LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention},
author={Ikuya Yamada and Akari Asai and Hiroyuki Shindo and Hideaki Takeda and Yuji Matsumoto},
booktitle={EMNLP},
year={2020}
}
```
|
|
studios/TES | 2021-04-06T08:17:28.000Z | []
| [
".gitattributes",
"README.md"
]
| studios | 0 | tes |
||
studios/TES2 | 2021-04-06T08:19:54.000Z | []
| [
".gitattributes",
"README.md"
]
| studios | 0 | tesss |
||
subbareddyiiit/BERT-NLP | 2021-05-20T07:15:46.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| subbareddyiiit | 136 | transformers | hello
|
subbareddyiiit/GPT2NLP | 2021-05-23T13:00:32.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 14 | transformers | hello
|
subbareddyiiit/RobertaNLP | 2021-05-20T21:57:23.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"optimizer.pt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 7 | transformers | hello
|
subbareddyiiit/TeAlbert | 2020-06-19T22:39:46.000Z | [
"pytorch",
"transformers"
]
| [
".gitattributes",
"config.json",
"graph.pbtxt",
"model.ckpt-100000.data-00000-of-00001",
"model.ckpt-100000.index",
"model.ckpt-100000.meta",
"pytorch_model.bin",
"spiece.model",
"vocab.txt"
]
| subbareddyiiit | 10 | transformers | ||
subbareddyiiit/TeElectra | 2020-06-21T06:59:39.000Z | [
"pytorch",
"electra",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"model.ckpt-70000.data-00000-of-00001",
"model.ckpt-70000.index",
"model.ckpt-70000.meta",
"pytorch_model.bin",
"vocab.txt"
]
| subbareddyiiit | 28 | transformers | |
subbareddyiiit/TeRobeRta | 2021-05-20T21:58:55.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 67 | transformers | |
subbareddyiiit/bert_csl_gold8k | 2021-05-20T07:17:19.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| subbareddyiiit | 8 | transformers | hello
|
subbareddyiiit/gpt2_csl_gold8k | 2021-05-23T13:01:39.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 6 | transformers | hello
|
subbareddyiiit/iiit | 2020-02-20T11:33:50.000Z | [
"tensorboard",
"transformers"
]
| [
".gitattributes",
"checkpoint",
"config.json",
"eval.tf_record",
"eval_results.txt",
"events.out.tfevents.1581945707.ip-10-0-1-89",
"graph.pbtxt",
"model.ckpt-5000.data-00000-of-00001",
"model.ckpt-5000.index",
"model.ckpt-5000.meta",
"model.ckpt-5490.data-00000-of-00001",
"model.ckpt-5490.index",
"model.ckpt-5490.meta",
"train.tf_record",
"vocab.txt",
"eval/events.out.tfevents.1581944455.ip-10-0-1-89",
"eval/events.out.tfevents.1581945569.ip-10-0-1-89",
"eval/events.out.tfevents.1581947490.ip-10-0-1-89"
]
| subbareddyiiit | 9 | transformers | ||
subbareddyiiit/inria_roberta | 2021-05-20T22:00:14.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 8 | transformers | hello
|
subbareddyiiit/music_cog | 2021-05-20T07:18:03.000Z | [
"tensorboard",
"bert",
"transformers"
]
| [
".gitattributes",
"checkpoint",
"config.json",
"eval_results.txt",
"events.out.tfevents.1581863366.ip-10-0-1-89",
"events.out.tfevents.1581944561.ip-10-0-1-89",
"events.out.tfevents.1581944630.ip-10-0-1-89",
"graph.pbtxt",
"model.ckpt-10000.data-00000-of-00001",
"model.ckpt-10000.index",
"model.ckpt-10000.meta",
"vocab.txt",
"eval/events.out.tfevents.1581866007.ip-10-0-1-89",
"eval/events.out.tfevents.1581945379.ip-10-0-1-89"
]
| subbareddyiiit | 17 | transformers | ||
subbareddyiiit/roberta_csl_gold8k | 2021-05-20T22:01:14.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| subbareddyiiit | 11 | transformers | hello
|
subbareddyiiit/tftelugu | 2020-02-20T11:36:36.000Z | [
"tensorboard",
"transformers"
]
| [
".gitattributes",
"checkpoint",
"config.json",
"eval_results.txt",
"events.out.tfevents.1581966106.ip-10-0-1-89",
"events.out.tfevents.1581968627.ip-10-0-1-89",
"graph.pbtxt",
"model.ckpt-20000.data-00000-of-00001",
"model.ckpt-20000.index",
"model.ckpt-20000.meta",
"vocab.txt",
"eval/events.out.tfevents.1581967489.ip-10-0-1-89",
"eval/events.out.tfevents.1581968510.ip-10-0-1-89",
"eval/events.out.tfevents.1581970029.ip-10-0-1-89"
]
| subbareddyiiit | 13 | transformers | ||
subham92/translation_model_by_subham | 2021-01-18T10:29:50.000Z | [
"pytorch",
"marian",
"seq2seq",
"fi",
"en",
"transformers",
"translation",
"license:apache-2.0",
"text2text-generation"
]
| translation | [
".gitattributes",
"README.md",
"config.json",
"metadata.json",
"pytorch_model.bin",
"source.spm",
"target.spm",
"tokenizer_config.json",
"vocab.json"
]
| subham92 | 26 | transformers | ---
language:
- fi
- en
tags:
- translation
license: apache-2.0
---
|
subiksha/OwnPersona | 2021-05-18T18:00:23.000Z | []
| [
".gitattributes"
]
| subiksha | 0 | |||
sublee/test | 2021-03-10T13:50:22.000Z | []
| [
".gitattributes"
]
| sublee | 0 | |||
sudhashri/Sri-AutoNLP01 | 2021-03-11T18:08:32.000Z | []
| [
".gitattributes",
"README.md"
]
| sudhashri | 0 | |||
sukritin/hindi-bert | 2021-05-20T07:19:00.000Z | [
"pytorch",
"jax",
"bert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| sukritin | 24 | transformers | |
sultan/BioM-ALBERT-xxlarge-PMC | 2021-05-24T21:10:15.000Z | [
"pytorch",
"albert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"PubMD-30k-clean.vocab",
"README.md",
"config.json",
"pytorch_model.bin",
"spiece.model"
]
| sultan | 20 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PMC full article for further 64k steps with a batch size of 8192, where we initiate our weights from our model BioM-ALBERT-xxlarge.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sultan/BioM-ALBERT-xxlarge-SQuAD2 | 2021-05-25T11:14:44.000Z | [
"pytorch",
"albert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| sultan | 30 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model is fine-tuned on the SQuAD2.0 dataset. Fine-tuning the biomedical language model on the SQuAD dataset helps improve the score on the BioASQ challenge. If you plan to work with BioASQ or biomedical QA tasks, it's better to use this model over BioM-ALBERT-xxlarge. This model (TensorFlow version ) took the lead in the BioASQ9b-Factoid challenge under the name of (UDEL-LAB1). To see the full details of BioASQ9B results, please check this link http://participants-area.bioasq.org/results/9b/phaseB/ ( you need to register).
Huggingface library doesn't implement the Layer-Wise decay feature, which affects the performance on the SQuAD task. The reported result of BioM-ALBERT-xxlarge-SQuAD in our paper is 87.00 (F1) since we use ALBERT open-source code with TF checkpoint, which uses Layer-Wise decay.
Result with PyTorch and V100 GPU
```
***** eval metrics *****
HasAns_exact = 77.6484
HasAns_f1 = 85.0136
HasAns_total = 5928
NoAns_exact = 86.577
NoAns_f1 = 86.577
NoAns_total = 5945
best_exact = 82.1191
best_exact_thresh = 0.0
best_f1 = 85.7964
best_f1_thresh = 0.0
eval_samples = 12551
exact = 82.1191
f1 = 85.7964
total = 11873
```
To reproduce results in Google Colab:
- Make sure you have GPU enabled.
- Clone and install required libraries through this code
!git clone https://github.com/huggingface/transformers
!pip3 install -e transformers
!pip3 install sentencepiece
!pip3 install -r /content/transformers/examples/pytorch/question-answering/requirements.txt
- Run this python code:
```python
python /content/transformers/examples/pytorch/question-answering/run_qa.py --model_name_or_path BioM-ALBERT-xxlarge-SQuAD2 \
--do_eval \
--version_2_with_negative \
--per_device_eval_batch_size 8 \
--dataset_name squad_v2 \
--overwrite_output_dir \
--fp16 \
--output_dir out
```
You don't need to download the SQuAD2 dataset. The code will download it from the HuggingFace datasets hub.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sultan/BioM-ALBERT-xxlarge | 2021-05-24T21:04:29.000Z | [
"pytorch",
"albert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"PubMD-30k-clean.vocab",
"README.md",
"config.json",
"pytorch_model.bin",
"spiece.model"
]
| sultan | 16 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PubMed Abstracts only with biomedical domain vocabulary for 264K steps with a batch size of 8192 on TPUv3-512 unit.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sultan/BioM-ELECTRA-Base-Discriminator | 2021-05-24T21:09:13.000Z | [
"pytorch",
"electra",
"pretraining",
"transformers"
]
| [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"vocab.txt"
]
| sultan | 23 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PubMed Abstracts only with biomedical domain vocabulary for 500K steps with a batch size of 1024 on TPUv3-32 unit.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
|
sultan/BioM-ELECTRA-Base-Generator | 2021-05-24T21:08:37.000Z | [
"pytorch",
"electra",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"vocab.txt"
]
| sultan | 13 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PubMed Abstracts only with biomedical domain vocabulary for 500k steps with a batch size of 1024 on TPUv3-32 unit.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sultan/BioM-ELECTRA-Large-Discriminator | 2021-05-24T21:07:17.000Z | [
"pytorch",
"electra",
"pretraining",
"transformers"
]
| [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"vocab.txt"
]
| sultan | 380 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PubMed Abstracts only with biomedical domain vocabulary for 434K steps with a batch size of 4096 on TPUv3-512 unit.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
|
sultan/BioM-ELECTRA-Large-Generator | 2021-05-24T21:07:58.000Z | [
"pytorch",
"electra",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"vocab.txt"
]
| sultan | 6 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model was pre-trained on PubMed Abstracts only with biomedical domain vocabulary for 434K steps with a batch size of 4096 on TPUv3-512 unit.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sultan/BioM-ELECTRA-Large-SQuAD2 | 2021-05-25T21:37:43.000Z | [
"pytorch",
"electra",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| sultan | 117 | transformers | # BioM-Transformers: Building Large Biomedical Language Models with BERT, ALBERT and ELECTRA
# Abstract
The impact of design choices on the performance
of biomedical language models recently
has been a subject for investigation. In
this paper, we empirically study biomedical
domain adaptation with large transformer models
using different design choices. We evaluate
the performance of our pretrained models
against other existing biomedical language
models in the literature. Our results show that
we achieve state-of-the-art results on several
biomedical domain tasks despite using similar
or less computational cost compared to other
models in the literature. Our findings highlight
the significant effect of design choices on
improving the performance of biomedical language
models.
# Model Description
This model is fine-tuned on the SQuAD2.0 dataset. Fine-tuning the biomedical language model on the SQuAD dataset helps improve the score on the BioASQ challenge. If you plan to work with BioASQ or biomedical QA tasks, it's better to use this model over BioM-ELECTRA-Large. This model (TensorFlow version ) took the lead in the BioASQ9b-Factoid challenge (Batch 5) under the name of (UDEL-LAB2). To see the full details of BioASQ9B results, please check this link http://participants-area.bioasq.org/results/9b/phaseB/ ( you need to register).
Huggingface library doesn't implement Layer-Wise decay feature, which affects the performance on SQuAD task. The reported result of BioM-ELECTRA-SQuAD in our paper is 88.3 (F1) since we use ELECTRA open-source code with TF checkpoint, which uses Layer-Wise decay.
Evaluation results on SQuAD2.0 Dev Dataset
```
exact = 84.33420365535248
f1 = 87.49354241889522
total = 11873
HasAns_exact = 80.43184885290148
HasAns_f1 = 86.75958656200127
HasAns_total = 5928
NoAns_exact = 88.22539949537426
NoAns_f1 = 88.22539949537426
NoAns_total = 5945
best_exact = 84.33420365535248
best_exact_thresh = 0.0
best_f1 = 87.49354241889522
best_f1_thresh = 0.0
epoch = 2.0
```
To reproduce results in Google Colab:
- Make sure you have GPU enabled.
- Clone and install required libraries through this code
!git clone https://github.com/huggingface/transformers
!pip3 install -e transformers
!pip3 install sentencepiece
!pip3 install -r /content/transformers/examples/pytorch/question-answering/requirements.txt
- Run this python code:
```python
python /content/transformers/examples/pytorch/question-answering/run_qa.py --model_name_or_path sultan/BioM-ELECTRA-Large-SQuAD2 \\
--do_eval \\
--version_2_with_negative \\
--per_device_eval_batch_size 8 \\
--dataset_name squad_v2 \\
--overwrite_output_dir \\
--fp16 \\
--output_dir out
```
You don't need to download the SQuAD2 dataset. The code will download it from the HuggingFace datasets hub.
Check our GitHub repo at https://github.com/salrowili/BioM-Transformers for TensorFlow and GluonNLP checkpoints.
# Acknowledgment
We would like to acknowledge the support we have from Tensorflow Research Cloud (TFRC) team to grant us access to TPUv3 units.
# Citation
```bibtex
@inproceedings{alrowili-shanker-2021-biom,
title = "{B}io{M}-Transformers: Building Large Biomedical Language Models with {BERT}, {ALBERT} and {ELECTRA}",
author = "Alrowili, Sultan and
Shanker, Vijay",
booktitle = "Proceedings of the 20th Workshop on Biomedical Language Processing",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bionlp-1.24",
pages = "221--227",
abstract = "The impact of design choices on the performance of biomedical language models recently has been a subject for investigation. In this paper, we empirically study biomedical domain adaptation with large transformer models using different design choices. We evaluate the performance of our pretrained models against other existing biomedical language models in the literature. Our results show that we achieve state-of-the-art results on several biomedical domain tasks despite using similar or less computational cost compared to other models in the literature. Our findings highlight the significant effect of design choices on improving the performance of biomedical language models.",
}
``` |
sumedh/wav2vec2-large-xlsr-marathi | 2021-03-29T18:40:16.000Z | [
"pytorch",
"wav2vec2",
"mr",
"dataset:openslr",
"transformers",
"audio",
"automatic-speech-recognition",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| sumedh | 60 | transformers | ---
language: mr
datasets:
- openslr
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Large 53 Marathi by Sumedh Khodke
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: OpenSLR mr
type: openslr
metrics:
- name: Test WER
type: wer
value: 12.7
---
# Wav2Vec2-Large-XLSR-53-Marathi
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Marathi using the [Open SLR64](http://openslr.org/64/) dataset. When using this model, make sure that your speech input is sampled at 16kHz. This data contains only female voices but the model works well for male voices too. Trained on Google Colab Pro on Tesla P100 16GB GPU.<br>
**WER (Word Error Rate) on the Test Set**: 12.70 %
## Usage
The model can be used directly without a language model as follows, given that your dataset has Marathi `actual_text` and `path_in_folder` columns:
```python
import torch, torchaudio
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
#Since marathi is not present on Common Voice, script for reading the below dataset can be picked up from the eval script below
mr_test_dataset = all_data['test']
processor = Wav2Vec2Processor.from_pretrained("sumedh/wav2vec2-large-xlsr-marathi")
model = Wav2Vec2ForCTC.from_pretrained("sumedh/wav2vec2-large-xlsr-marathi")
resampler = torchaudio.transforms.Resample(48_000, 16_000) #first arg - input sample, second arg - output sample
# Preprocessing the datasets. We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path_in_folder"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
mr_test_dataset = mr_test_dataset.map(speech_file_to_array_fn)
inputs = processor(mr_test_dataset["speech"][:5], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", mr_test_dataset["actual_text"][:5])
```
## Evaluation
Evaluated on 10% of the Marathi data on Open SLR-64.
```python
import os, re, torch, torchaudio
from datasets import Dataset, load_metric
import pandas as pd
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
#below is a custom script to be used for reading marathi dataset since its not present on the Common Voice
dataset_path = "./OpenSLR-64_Marathi/mr_in_female/" #TODO : include the path of the dataset extracted from http://openslr.org/64/
audio_df = pd.read_csv(os.path.join(dataset_path,'line_index.tsv'),sep='\t',header=None)
audio_df.columns = ['path_in_folder','actual_text']
audio_df['path_in_folder'] = audio_df['path_in_folder'].apply(lambda x: dataset_path + x + '.wav')
audio_df = audio_df.sample(frac=1, random_state=2020).reset_index(drop=True) #seed number is important for reproducibility of WER score
all_data = Dataset.from_pandas(audio_df)
all_data = all_data.train_test_split(test_size=0.10,seed=2020) #seed number is important for reproducibility of WER score
mr_test_dataset = all_data['test']
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("sumedh/wav2vec2-large-xlsr-marathi")
model = Wav2Vec2ForCTC.from_pretrained("sumedh/wav2vec2-large-xlsr-marathi")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets. We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["actual_text"] = re.sub(chars_to_ignore_regex, '', batch["actual_text"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path_in_folder"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
mr_test_dataset = mr_test_dataset.map(speech_file_to_array_fn)
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = mr_test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["actual_text"])))
```
## Training
Train-Test ratio was 90:10.
The training notebook Colab link [here](https://colab.research.google.com/drive/1wX46fjExcgU5t3AsWhSPTipWg_aMDg2f?usp=sharing).
## Training Config and Summary
weights-and-biases run summary [here](https://wandb.ai/wandb/xlsr/runs/3itdhtb8/overview?workspace=user-sumedhkhodke)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.