Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-MSR-persian-base | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-PN-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-parsinlu-multiple-choice | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-parsinlu-qqp | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-parsinlu-sentiment-food | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-parsinlu-sentiment-movie | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-parsinlu-textual-entailment | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SH-persian-base | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-PN-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-parsinlu-multiple-choice | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-parsinlu-qqp | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-parsinlu-sentiment-food | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-parsinlu-sentiment-movie | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-parsinlu-textual-entailment | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-100-persian-base | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-PN-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-parsinlu-multiple-choice | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-parsinlu-qqp | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-parsinlu-sentiment-food | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-parsinlu-sentiment-movie | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-parsinlu-textual-entailment | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/ARMAN-SS-80-persian-base | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-PN-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-parsinlu-multiple-choice | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-parsinlu-qqp | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-parsinlu-sentiment-food | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-parsinlu-sentiment-movie | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-parsinlu-textual-entailment | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/PEGASUS-persian-base | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-PN-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-perkey-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-perkey-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-tebyan | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-voa-title | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers | More information about models is available [here](https://github.com/alirezasalemi7/ARMAN). | {} | alireza7/TRANSFORMER-persian-base-wiki-summary | null | [
"transformers",
"pytorch",
"pegasus",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers |
# A conversational model based on the character of Sheldon Cooper from Big Bang Theory. | {"tags": ["conversational"]} | alistair7/bbt-diagpt2-model | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
question-answering | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-pretrain-finetuned-coqa-falt
This model is a fine-tuned version of [alistvt/bert-base-uncased-pretrained-mlm-coqa-stories](https://huggingface.co/alistvt/bert-base-uncased-pretrained-mlm-coqa-stories) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.8125
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 3.4039 | 0.29 | 2000 | 3.0921 |
| 3.1438 | 0.59 | 4000 | 2.8826 |
| 3.0252 | 0.88 | 6000 | 2.7885 |
| 2.7112 | 1.18 | 8000 | 2.7720 |
| 2.6703 | 1.47 | 10000 | 2.7581 |
| 2.6432 | 1.77 | 12000 | 2.7316 |
| 2.385 | 2.06 | 14000 | 2.7798 |
| 2.3314 | 2.36 | 16000 | 2.7836 |
| 2.3433 | 2.65 | 18000 | 2.7650 |
| 2.3604 | 2.95 | 20000 | 2.7585 |
| 2.2232 | 3.24 | 22000 | 2.8120 |
| 2.2094 | 3.53 | 24000 | 2.7945 |
| 2.2306 | 3.83 | 26000 | 2.8125 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.18.0
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-pretrain-finetuned-coqa-falt", "results": []}]} | alistvt/bert-base-uncased-pretrain-finetuned-coqa-falt | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"question-answering",
"generated_from_trainer",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
question-answering | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-pretrain-finetuned-coqa-falttened
This model is a fine-tuned version of [alistvt/bert-base-uncased-pretrained-mlm-coqa-stories](https://huggingface.co/alistvt/bert-base-uncased-pretrained-mlm-coqa-stories) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.8655
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 3.2886 | 0.29 | 2000 | 3.0142 |
| 3.0801 | 0.59 | 4000 | 2.8347 |
| 2.9744 | 0.88 | 6000 | 2.7643 |
| 2.494 | 1.18 | 8000 | 2.7605 |
| 2.4417 | 1.47 | 10000 | 2.7790 |
| 2.4042 | 1.77 | 12000 | 2.7382 |
| 2.1285 | 2.06 | 14000 | 2.8588 |
| 2.0569 | 2.36 | 16000 | 2.8937 |
| 2.0794 | 2.65 | 18000 | 2.8511 |
| 2.0679 | 2.95 | 20000 | 2.8655 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-pretrain-finetuned-coqa-falttened", "results": []}]} | alistvt/bert-base-uncased-pretrain-finetuned-coqa-falttened | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"question-answering",
"generated_from_trainer",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-pretrained-clm-coqa-stories
This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0002
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.0201 | 1.0 | 2479 | 0.0018 |
| 0.0033 | 2.0 | 4958 | 0.0003 |
| 0.0014 | 3.0 | 7437 | 0.0002 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-pretrained-clm-coqa-stories", "results": []}]} | alistvt/bert-base-uncased-pretrained-clm-coqa-stories | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-generation",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | alistvt/bert-base-uncased-pretrained-mlm-coqa-stories-pretrain-finetuned-coqa-falttened | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-uncased-pretrained-mlm-coqa-stories
This model was trained from scratch on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8310
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.0573 | 1.0 | 2479 | 1.8805 |
| 1.9517 | 2.0 | 4958 | 1.8377 |
| 1.9048 | 3.0 | 7437 | 1.8310 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.0+cu111
- Datasets 1.17.0
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-uncased-pretrained-mlm-coqa-stories", "results": []}]} | alistvt/bert-base-uncased-pretrained-mlm-coqa-stories | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | alistvt/output | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | alk/distilbert-base-uncased-finetuned-emotion | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers |
# HerBERT
**[HerBERT](https://en.wikipedia.org/wiki/Zbigniew_Herbert)** is a BERT-based Language Model trained on Polish corpora
using Masked Language Modelling (MLM) and Sentence Structural Objective (SSO) with dynamic masking of whole words. For more details, please refer to: [HerBERT: Efficiently Pretrained Transformer-based Language Model for Polish](https://www.aclweb.org/anthology/2021.bsnlp-1.1/).
Model training and experiments were conducted with [transformers](https://github.com/huggingface/transformers) in version 2.9.
## Corpus
HerBERT was trained on six different corpora available for Polish language:
| Corpus | Tokens | Documents |
| :------ | ------: | ------: |
| [CCNet Middle](https://github.com/facebookresearch/cc_net) | 3243M | 7.9M |
| [CCNet Head](https://github.com/facebookresearch/cc_net) | 2641M | 7.0M |
| [National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=1)| 1357M | 3.9M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1056M | 1.1M
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.4M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
## Tokenizer
The training dataset was tokenized into subwords using a character level byte-pair encoding (``CharBPETokenizer``) with
a vocabulary size of 50k tokens. The tokenizer itself was trained with a [tokenizers](https://github.com/huggingface/tokenizers) library.
We kindly encourage you to use the ``Fast`` version of the tokenizer, namely ``HerbertTokenizerFast``.
## Usage
Example code:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-base-cased")
model = AutoModel.from_pretrained("allegro/herbert-base-cased")
output = model(
**tokenizer.batch_encode_plus(
[
(
"A potem szedł środkiem drogi w kurzawie, bo zamiatał nogami, ślepy dziad prowadzony przez tłustego kundla na sznurku.",
"A potem leciał od lasu chłopak z butelką, ale ten ujrzawszy księdza przy drodze okrążył go z dala i biegł na przełaj pól do karczmy."
)
],
padding='longest',
add_special_tokens=True,
return_tensors='pt'
)
)
```
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{mroczkowski-etal-2021-herbert,
title = "{H}er{BERT}: Efficiently Pretrained Transformer-based Language Model for {P}olish",
author = "Mroczkowski, Robert and
Rybak, Piotr and
Wr{\\'o}blewska, Alina and
Gawlik, Ireneusz",
booktitle = "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing",
month = apr,
year = "2021",
address = "Kiyv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bsnlp-1.1",
pages = "1--10",
}
```
## Authors
The model was trained by [**Machine Learning Research Team at Allegro**](https://ml.allegro.tech/) and [**Linguistic Engineering Group at Institute of Computer Science, Polish Academy of Sciences**](http://zil.ipipan.waw.pl/).
You can contact us at: <a href="mailto:[email protected]">[email protected]</a> | {"language": "pl", "license": "cc-by-4.0", "tags": ["herbert"]} | allegro/herbert-base-cased | null | [
"transformers",
"pytorch",
"tf",
"jax",
"bert",
"feature-extraction",
"herbert",
"pl",
"license:cc-by-4.0",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# HerBERT tokenizer
**[HerBERT](https://en.wikipedia.org/wiki/Zbigniew_Herbert)** tokenizer is a character level byte-pair encoding with
vocabulary size of 50k tokens. The tokenizer was trained on [Wolne Lektury](https://wolnelektury.pl/) and a publicly available subset of
[National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=0) with [fastBPE](https://github.com/glample/fastBPE) library.
Tokenizer utilize `XLMTokenizer` implementation from [transformers](https://github.com/huggingface/transformers).
## Tokenizer usage
Herbert tokenizer should be used together with [HerBERT model](https://huggingface.co/allegro/herbert-klej-cased-v1):
```python
from transformers import XLMTokenizer, RobertaModel
tokenizer = XLMTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
encoded_input = tokenizer.encode("Kto ma lepszą sztukę, ma lepszy rząd – to jasne.", return_tensors='pt')
outputs = model(encoded_input)
```
## License
CC BY-SA 4.0
## Citation
If you use this tokenizer, please cite the following paper:
```
@inproceedings{rybak-etal-2020-klej,
title = "{KLEJ}: Comprehensive Benchmark for {P}olish Language Understanding",
author = "Rybak, Piotr and
Mroczkowski, Robert and
Tracz, Janusz and
Gawlik, Ireneusz",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.acl-main.111",
doi = "10.18653/v1/2020.acl-main.111",
pages = "1191--1201",
}
```
## Authors
Tokenizer was created by **Allegro Machine Learning Research** team.
You can contact us at: <a href="mailto:[email protected]">[email protected]</a>
| {"language": "pl"} | allegro/herbert-klej-cased-tokenizer-v1 | null | [
"transformers",
"xlm",
"pl",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# HerBERT
**[HerBERT](https://en.wikipedia.org/wiki/Zbigniew_Herbert)** is a BERT-based Language Model trained on Polish Corpora
using only MLM objective with dynamic masking of whole words. For more details, please refer to:
[KLEJ: Comprehensive Benchmark for Polish Language Understanding](https://arxiv.org/abs/2005.00630).
## Dataset
**HerBERT** training dataset is a combination of several publicly available corpora for Polish language:
| Corpus | Tokens | Texts |
| :------ | ------: | ------: |
| [OSCAR](https://traces1.inria.fr/oscar/)| 6710M | 145M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1084M | 1.1M |
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.5M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
| [Allegro Articles](https://allegro.pl/artykuly) | 18M | 33k |
## Tokenizer
The training dataset was tokenized into subwords using [HerBERT Tokenizer](https://huggingface.co/allegro/herbert-klej-cased-tokenizer-v1); a character level byte-pair encoding with
a vocabulary size of 50k tokens. The tokenizer itself was trained on [Wolne Lektury](https://wolnelektury.pl/) and a publicly available subset of
[National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=0) with a [fastBPE](https://github.com/glample/fastBPE) library.
Tokenizer utilizes `XLMTokenizer` implementation for that reason, one should load it as `allegro/herbert-klej-cased-tokenizer-v1`.
## HerBERT models summary
| Model | WWM | Cased | Tokenizer | Vocab Size | Batch Size | Train Steps |
| :------ | ------: | ------: | ------: | ------: | ------: | ------: |
| herbert-klej-cased-v1 | YES | YES | BPE | 50K | 570 | 180k |
## Model evaluation
HerBERT was evaluated on the [KLEJ](https://klejbenchmark.com/) benchmark, publicly available set of nine evaluation tasks for the Polish language understanding.
It had the best average performance and obtained the best results for three of them.
| Model | Average | NKJP-NER | CDSC-E | CDSC-R | CBD | PolEmo2.0-IN\t|PolEmo2.0-OUT | DYK | PSC | AR\t|
| :------ | ------: | ------: | ------: | ------: | ------: | ------: | ------: | ------: | ------: | ------: |
| herbert-klej-cased-v1 | **80.5** | 92.7 | 92.5 | 91.9 | **50.3** | **89.2** |**76.3** |52.1 |95.3 | 84.5 |
Full leaderboard is available [online](https://klejbenchmark.com/leaderboard).
## HerBERT usage
Model training and experiments were conducted with [transformers](https://github.com/huggingface/transformers) in version 2.0.
Example code:
```python
from transformers import XLMTokenizer, RobertaModel
tokenizer = XLMTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
encoded_input = tokenizer.encode("Kto ma lepszą sztukę, ma lepszy rząd – to jasne.", return_tensors='pt')
outputs = model(encoded_input)
```
HerBERT can also be loaded using `AutoTokenizer` and `AutoModel`:
```python
tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1")
```
## License
CC BY-SA 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{rybak-etal-2020-klej,
title = "{KLEJ}: Comprehensive Benchmark for {P}olish Language Understanding",
author = "Rybak, Piotr and
Mroczkowski, Robert and
Tracz, Janusz and
Gawlik, Ireneusz",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.acl-main.111",
doi = "10.18653/v1/2020.acl-main.111",
pages = "1191--1201",
}
```
## Authors
The model was trained by **Allegro Machine Learning Research** team.
You can contact us at: <a href="mailto:[email protected]">[email protected]</a>
| {"language": "pl"} | allegro/herbert-klej-cased-v1 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"pl",
"arxiv:2005.00630",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers |
# HerBERT
**[HerBERT](https://en.wikipedia.org/wiki/Zbigniew_Herbert)** is a BERT-based Language Model trained on Polish corpora
using Masked Language Modelling (MLM) and Sentence Structural Objective (SSO) with dynamic masking of whole words. For more details, please refer to: [HerBERT: Efficiently Pretrained Transformer-based Language Model for Polish](https://www.aclweb.org/anthology/2021.bsnlp-1.1/).
Model training and experiments were conducted with [transformers](https://github.com/huggingface/transformers) in version 2.9.
## Corpus
HerBERT was trained on six different corpora available for Polish language:
| Corpus | Tokens | Documents |
| :------ | ------: | ------: |
| [CCNet Middle](https://github.com/facebookresearch/cc_net) | 3243M | 7.9M |
| [CCNet Head](https://github.com/facebookresearch/cc_net) | 2641M | 7.0M |
| [National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=1)| 1357M | 3.9M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1056M | 1.1M
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.4M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
## Tokenizer
The training dataset was tokenized into subwords using a character level byte-pair encoding (``CharBPETokenizer``) with
a vocabulary size of 50k tokens. The tokenizer itself was trained with a [tokenizers](https://github.com/huggingface/tokenizers) library.
We kindly encourage you to use the ``Fast`` version of the tokenizer, namely ``HerbertTokenizerFast``.
## Usage
Example code:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-large-cased")
model = AutoModel.from_pretrained("allegro/herbert-large-cased")
output = model(
**tokenizer.batch_encode_plus(
[
(
"A potem szedł środkiem drogi w kurzawie, bo zamiatał nogami, ślepy dziad prowadzony przez tłustego kundla na sznurku.",
"A potem leciał od lasu chłopak z butelką, ale ten ujrzawszy księdza przy drodze okrążył go z dala i biegł na przełaj pól do karczmy."
)
],
padding='longest',
add_special_tokens=True,
return_tensors='pt'
)
)
```
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{mroczkowski-etal-2021-herbert,
title = "{H}er{BERT}: Efficiently Pretrained Transformer-based Language Model for {P}olish",
author = "Mroczkowski, Robert and
Rybak, Piotr and
Wr{\'o}blewska, Alina and
Gawlik, Ireneusz",
booktitle = "Proceedings of the 8th Workshop on Balto-Slavic Natural Language Processing",
month = apr,
year = "2021",
address = "Kiyv, Ukraine",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2021.bsnlp-1.1",
pages = "1--10",
}
```
## Authors
The model was trained by [**Machine Learning Research Team at Allegro**](https://ml.allegro.tech/) and [**Linguistic Engineering Group at Institute of Computer Science, Polish Academy of Sciences**](http://zil.ipipan.waw.pl/).
You can contact us at: <a href="mailto:[email protected]">[email protected]</a> | {"language": "pl", "license": "cc-by-4.0", "tags": ["herbert"]} | allegro/herbert-large-cased | null | [
"transformers",
"pytorch",
"tf",
"jax",
"bert",
"feature-extraction",
"herbert",
"pl",
"license:cc-by-4.0",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
translation | transformers |
# plT5 Base
**plT5** models are T5-based language models trained on Polish corpora. The models were optimized for the original T5 denoising target.
## Corpus
plT5 was trained on six different corpora available for Polish language:
| Corpus | Tokens | Documents |
| :------ | ------: | ------: |
| [CCNet Middle](https://github.com/facebookresearch/cc_net) | 3243M | 7.9M |
| [CCNet Head](https://github.com/facebookresearch/cc_net) | 2641M | 7.0M |
| [National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=1)| 1357M | 3.9M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1056M | 1.1M
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.4M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
## Tokenizer
The training dataset was tokenized into subwords using a sentencepiece unigram model with
vocabulary size of 50k tokens.
## Usage
Example code:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allegro/plt5-base")
model = AutoModel.from_pretrained("allegro/plt5-base")
```
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@article{chrabrowa2022evaluation,
title={Evaluation of Transfer Learning for Polish with a Text-to-Text Model},
author={Chrabrowa, Aleksandra and Dragan, {\L}ukasz and Grzegorczyk, Karol and Kajtoch, Dariusz and Koszowski, Miko{\l}aj and Mroczkowski, Robert and Rybak, Piotr},
journal={arXiv preprint arXiv:2205.08808},
year={2022}
}
```
## Authors
The model was trained by [**Machine Learning Research Team at Allegro**](https://ml.allegro.tech/) and [**Linguistic Engineering Group at Institute of Computer Science, Polish Academy of Sciences**](http://zil.ipipan.waw.pl/).
You can contact us at: <a href="mailto:[email protected]">[email protected]</a> | {"language": "pl", "license": "cc-by-4.0", "tags": ["T5", "translation", "summarization", "question answering", "reading comprehension"], "datasets": ["ccnet", "nkjp", "wikipedia", "open subtitles", "free readings"]} | allegro/plt5-base | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"T5",
"translation",
"summarization",
"question answering",
"reading comprehension",
"pl",
"license:cc-by-4.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
translation | transformers |
# plT5 Large
**plT5** models are T5-based language models trained on Polish corpora. The models were optimized for the original T5 denoising target.
## Corpus
plT5 was trained on six different corpora available for Polish language:
| Corpus | Tokens | Documents |
| :------ | ------: | ------: |
| [CCNet Middle](https://github.com/facebookresearch/cc_net) | 3243M | 7.9M |
| [CCNet Head](https://github.com/facebookresearch/cc_net) | 2641M | 7.0M |
| [National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=1)| 1357M | 3.9M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1056M | 1.1M
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.4M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
## Tokenizer
The training dataset was tokenized into subwords using a sentencepiece unigram model with
vocabulary size of 50k tokens.
## Usage
Example code:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allegro/plt5-large")
model = AutoModel.from_pretrained("allegro/plt5-large")
```
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@article{chrabrowa2022evaluation,
title={Evaluation of Transfer Learning for Polish with a Text-to-Text Model},
author={Chrabrowa, Aleksandra and Dragan, {\L}ukasz and Grzegorczyk, Karol and Kajtoch, Dariusz and Koszowski, Miko{\l}aj and Mroczkowski, Robert and Rybak, Piotr},
journal={arXiv preprint arXiv:2205.08808},
year={2022}
}
```
## Authors
The model was trained by [**Machine Learning Research Team at Allegro**](https://ml.allegro.tech/) and [**Linguistic Engineering Group at Institute of Computer Science, Polish Academy of Sciences**](http://zil.ipipan.waw.pl/).
You can contact us at: <a href="mailto:[email protected]">[email protected]</a> | {"language": "pl", "license": "cc-by-4.0", "tags": ["T5", "translation", "summarization", "question answering", "reading comprehension"], "datasets": ["ccnet", "nkjp", "wikipedia", "open subtitles", "free readings"]} | allegro/plt5-large | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"T5",
"translation",
"summarization",
"question answering",
"reading comprehension",
"pl",
"license:cc-by-4.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
translation | transformers |
# plT5 Small
**plT5** models are T5-based language models trained on Polish corpora. The models were optimized for the original T5 denoising target.
## Corpus
plT5 was trained on six different corpora available for Polish language:
| Corpus | Tokens | Documents |
| :------ | ------: | ------: |
| [CCNet Middle](https://github.com/facebookresearch/cc_net) | 3243M | 7.9M |
| [CCNet Head](https://github.com/facebookresearch/cc_net) | 2641M | 7.0M |
| [National Corpus of Polish](http://nkjp.pl/index.php?page=14&lang=1)| 1357M | 3.9M |
| [Open Subtitles](http://opus.nlpl.eu/OpenSubtitles-v2018.php) | 1056M | 1.1M
| [Wikipedia](https://dumps.wikimedia.org/) | 260M | 1.4M |
| [Wolne Lektury](https://wolnelektury.pl/) | 41M | 5.5k |
## Tokenizer
The training dataset was tokenized into subwords using a sentencepiece unigram model with
vocabulary size of 50k tokens.
## Usage
Example code:
```python
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("allegro/plt5-small")
model = AutoModel.from_pretrained("allegro/plt5-small")
```
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@article{chrabrowa2022evaluation,
title={Evaluation of Transfer Learning for Polish with a Text-to-Text Model},
author={Chrabrowa, Aleksandra and Dragan, {\L}ukasz and Grzegorczyk, Karol and Kajtoch, Dariusz and Koszowski, Miko{\l}aj and Mroczkowski, Robert and Rybak, Piotr},
journal={arXiv preprint arXiv:2205.08808},
year={2022}
}
```
## Authors
The model was trained by [**Machine Learning Research Team at Allegro**](https://ml.allegro.tech/) and [**Linguistic Engineering Group at Institute of Computer Science, Polish Academy of Sciences**](http://zil.ipipan.waw.pl/).
You can contact us at: <a href="mailto:[email protected]">[email protected]</a> | {"language": "pl", "license": "cc-by-4.0", "tags": ["T5", "translation", "summarization", "question answering", "reading comprehension"], "datasets": ["ccnet", "nkjp", "wikipedia", "open subtitles", "free readings"]} | allegro/plt5-small | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"T5",
"translation",
"summarization",
"question answering",
"reading comprehension",
"pl",
"license:cc-by-4.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
question-answering | allennlp |
This is an implementation of the BiDAF model with ELMo embeddings. The basic layout is pretty simple: encode words as a combination of word embeddings and a character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of attentions to put question information into the passage word representations (this is the only part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and do a softmax over span start and span end.
CAVEATS:
------
This model is based on ELMo. ELMo is not deterministic, meaning that you will see slight differences every time you run it. Also, ELMo likes to be warmed up, so we recommend processing dummy input before processing real workloads with it. | {"language": "en", "tags": ["allennlp", "question-answering"]} | allenai/bidaf-elmo | null | [
"allennlp",
"tensorboard",
"question-answering",
"en",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
question-answering | allennlp |
This is an implementation of the BiDAF model with GloVe embeddings. The basic layout is pretty simple: encode words as a combination of word embeddings and a character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of attentions to put question information into the passage word representations (this is the only part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and do a softmax over span start and span end. | {"language": "en", "tags": ["allennlp", "question-answering"]} | allenai/bidaf | null | [
"allennlp",
"tensorboard",
"question-answering",
"en",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers |
# BioMed-RoBERTa-base
BioMed-RoBERTa-base is a language model based on the RoBERTa-base (Liu et. al, 2019) architecture. We adapt RoBERTa-base to 2.68 million scientific papers from the [Semantic Scholar](https://www.semanticscholar.org) corpus via continued pretraining. This amounts to 7.55B tokens and 47GB of data. We use the full text of the papers in training, not just abstracts.
Specific details of the adaptive pretraining procedure can be found in Gururangan et. al, 2020.
## Evaluation
BioMed-RoBERTa achieves competitive performance to state of the art models on a number of NLP tasks in the biomedical domain (numbers are mean (standard deviation) over 3+ random seeds)
| Task | Task Type | RoBERTa-base | BioMed-RoBERTa-base |
|--------------|---------------------|--------------|---------------------|
| RCT-180K | Text Classification | 86.4 (0.3) | 86.9 (0.2) |
| ChemProt | Relation Extraction | 81.1 (1.1) | 83.0 (0.7) |
| JNLPBA | NER | 74.3 (0.2) | 75.2 (0.1) |
| BC5CDR | NER | 85.6 (0.1) | 87.8 (0.1) |
| NCBI-Disease | NER | 86.6 (0.3) | 87.1 (0.8) |
More evaluations TBD.
## Citation
If using this model, please cite the following paper:
```bibtex
@inproceedings{domains,
author = {Suchin Gururangan and Ana Marasović and Swabha Swayamdipta and Kyle Lo and Iz Beltagy and Doug Downey and Noah A. Smith},
title = {Don't Stop Pretraining: Adapt Language Models to Domains and Tasks},
year = {2020},
booktitle = {Proceedings of ACL},
}
```
| {"language": "en", "thumbnail": "https://huggingface.co/front/thumbnails/allenai.png"} | allenai/biomed_roberta_base | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | transformers | {"language": "en"} | allenai/cs_roberta_base | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_biomed_tapt_chemprot_4169 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_biomed_tapt_rct_180K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_biomed_tapt_rct_500 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_cs_tapt_citation_intent_1688 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_cs_tapt_sciie_3219 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_news_tapt_ag_115K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_news_tapt_hyperpartisan_news_5015 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_news_tapt_hyperpartisan_news_515 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_reviews_tapt_amazon_helpfulness_115K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_reviews_tapt_imdb_20000 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_dapt_reviews_tapt_imdb_70000 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_ag_115K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_amazon_helpfulness_115K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_chemprot_4169 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_citation_intent_1688 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_hyperpartisan_news_5015 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_hyperpartisan_news_515 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_imdb_20000 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_imdb_70000 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_rct_180K | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | transformers | {"language": "en"} | allenai/dsp_roberta_base_tapt_rct_500 | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"en",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.