modelId
stringlengths 4
112
| sha
stringlengths 40
40
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 29
values | private
bool 1
class | author
stringlengths 2
38
⌀ | config
null | id
stringlengths 4
112
| downloads
float64 0
36.8M
⌀ | likes
float64 0
712
⌀ | library_name
stringclasses 17
values | __index_level_0__
int64 0
38.5k
| readme
stringlengths 0
186k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
masakhane/m2m100_418M_en_pcm_rel
|
7cf2f14f2ef70f4649f1ae681f71ffacadcee7ac
|
2022-05-10T12:01:29.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_pcm_rel
| 0 | null |
transformers
| 37,400 |
---
license: afl-3.0
---
|
masakhane/afrimt5_en_swa_news
|
067682312fc9c69f91946f332c97a14fe1917d86
|
2022-05-10T13:50:24.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_en_swa_news
| 0 | null |
transformers
| 37,401 |
---
license: afl-3.0
---
|
masakhane/afrimt5_swa_en_news
|
3105a5405e9bc898eaaa7428ffa1e0d55bd9ce3f
|
2022-05-10T13:50:27.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_swa_en_news
| 0 | null |
transformers
| 37,402 |
---
license: afl-3.0
---
|
masakhane/afrimbart_swa_en_news
|
7bc9ac0993cc97fa5f86f340197f74c26f992a22
|
2022-05-10T13:50:29.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_swa_en_news
| 0 | null |
transformers
| 37,403 |
---
license: afl-3.0
---
|
masakhane/afrimbart_en_swa_news
|
3f45d397e7a8bdb67d8e0264220cc0fe6e21d3b5
|
2022-05-10T13:50:32.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_en_swa_news
| 0 | null |
transformers
| 37,404 |
---
license: afl-3.0
---
|
masakhane/afribyt5_swa_en_news
|
6e2b1535266fc3ac2a0c0261cd2921a34325867c
|
2022-05-10T14:00:13.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_swa_en_news
| 0 | null |
transformers
| 37,405 |
---
license: afl-3.0
---
|
masakhane/afribyt5_en_swa_news
|
57b14166192dd8d307681ec5127be7728e7f9b2d
|
2022-05-10T14:00:11.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_en_swa_news
| 0 | null |
transformers
| 37,406 |
---
license: afl-3.0
---
|
masakhane/byt5_en_swa_news
|
6034c53957ad712400087940547f71d604904ac8
|
2022-05-10T14:00:17.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_en_swa_news
| 0 | null |
transformers
| 37,407 |
---
license: afl-3.0
---
|
masakhane/byt5_swa_en_news
|
9a48ae2c054b542393f6e5b478c837ba3c83af4d
|
2022-05-10T14:00:15.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_swa_en_news
| 0 | null |
transformers
| 37,408 |
---
license: afl-3.0
---
|
masakhane/mt5_swa_en_news
|
3ca8a182b1910a78f541acf3c43f1823927b0b84
|
2022-05-10T14:10:04.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_swa_en_news
| 0 | null |
transformers
| 37,409 |
---
license: afl-3.0
---
|
masakhane/mt5_en_swa_news
|
2cf99d7bf6259ff8a488dda649298f25c87c76dc
|
2022-05-10T14:10:00.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_en_swa_news
| 0 | null |
transformers
| 37,410 |
---
license: afl-3.0
---
|
masakhane/mbart50_en_swa_news
|
23648ee40b089a349df43ae72be0d777a45954df
|
2022-05-10T14:10:02.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_en_swa_news
| 0 | null |
transformers
| 37,411 |
---
license: afl-3.0
---
|
masakhane/mbart50_swa_en_news
|
c5796515fba2685b61357c59cd857c36ce39cd60
|
2022-05-10T14:10:06.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_swa_en_news
| 0 | null |
transformers
| 37,412 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_swa_news
|
89b47d51eb0bba6d6da14367dbc9179b044172b2
|
2022-05-10T14:24:38.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_swa_news
| 0 | null |
transformers
| 37,413 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_swa_en_news
|
4b1b6c23b28cfcb07dc0914fbe213c38b2a5f394
|
2022-05-10T14:24:49.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_swa_en_news
| 0 | null |
transformers
| 37,414 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_swa_en_rel_news
|
f59c1deea4c4faf9896fe809e45fe5a8130f9094
|
2022-05-10T14:24:41.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_swa_en_rel_news
| 0 | null |
transformers
| 37,415 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_swa_rel_news_ft
|
7cd1397e1f1eb710d1594117f1e698b5a123f9d0
|
2022-05-10T14:34:00.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_swa_rel_news_ft
| 0 | null |
transformers
| 37,416 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_swa_rel_ft
|
9a0844fff22a24a377679b1dcefb61a36b213df7
|
2022-05-10T14:34:18.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_swa_rel_ft
| 0 | null |
transformers
| 37,417 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_swa_en_rel_ft
|
eae203e3d1365d056662d817e689e55f37b1310d
|
2022-05-10T14:34:28.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_swa_en_rel_ft
| 0 | null |
transformers
| 37,418 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_swa_en_rel
|
62b6276422666842a4c1ef59ebc0f1c7b5124ae1
|
2022-05-10T14:40:03.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_swa_en_rel
| 0 | null |
transformers
| 37,419 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_swa_rel
|
fe3034124eef4919a355b3e345b3a99d88763c97
|
2022-05-10T14:40:06.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_swa_rel
| 0 | null |
transformers
| 37,420 |
---
license: afl-3.0
---
|
huggingtweets/_avichalp_
|
ed2ee013c5457309edb25a266e8a070a904bb6b1
|
2022-05-10T11:56:46.000Z
|
[
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] |
text-generation
| false |
huggingtweets
| null |
huggingtweets/_avichalp_
| 0 | null |
transformers
| 37,421 |
---
language: en
thumbnail: http://www.huggingtweets.com/_avichalp_/1652183801632/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1472922431396331520/eqT17_QF_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">avi</div>
<div style="text-align: center; font-size: 14px;">@_avichalp_</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from avi.
| Data | avi |
| --- | --- |
| Tweets downloaded | 2625 |
| Retweets | 259 |
| Short tweets | 596 |
| Tweets kept | 1770 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2wg7ysai/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @_avichalp_'s tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3ae6t1qq) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3ae6t1qq/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/_avichalp_')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
masakhane/afrimt5_en_yor_news
|
2fec7768b8f65eb3f4ad1cae83d05555407875ff
|
2022-05-10T12:41:12.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_en_yor_news
| 0 | null |
transformers
| 37,422 |
---
license: afl-3.0
---
|
masakhane/afrimt5_yor_en_news
|
de96f9f96b5ae19b9217983893b85e1aea9e1f01
|
2022-05-10T12:41:16.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_yor_en_news
| 0 | null |
transformers
| 37,423 |
---
license: afl-3.0
---
|
masakhane/afrimbart_yor_en_news
|
0f9f19e8ddcb61f1b2b3d0d272c28f7b53e86c58
|
2022-05-10T12:41:19.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_yor_en_news
| 0 | null |
transformers
| 37,424 |
---
license: afl-3.0
---
|
masakhane/afrimbart_en_yor_news
|
e8528b2bd82ed2264a4e57a7b079877633710de6
|
2022-05-10T12:41:22.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_en_yor_news
| 0 | null |
transformers
| 37,425 |
---
license: afl-3.0
---
|
hadifar/xlm-roberta-base-ft-CSTwitter
|
7d1259adf82dd5a05b72bed93d363d6759925fb1
|
2022-05-10T12:30:32.000Z
|
[
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] |
fill-mask
| false |
hadifar
| null |
hadifar/xlm-roberta-base-ft-CSTwitter
| 0 | null |
transformers
| 37,426 |
Entry not found
|
masakhane/afribyt5_yor_en_news
|
79d6db1fa2dbfbfcf49de8b01d28947dfba5aaa1
|
2022-05-10T12:50:05.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_yor_en_news
| 0 | null |
transformers
| 37,427 |
---
license: afl-3.0
---
|
masakhane/afribyt5_en_yor_news
|
a1de36d562b975ab47a356ea9ecf491186ef42ab
|
2022-05-10T12:50:08.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_en_yor_news
| 0 | null |
transformers
| 37,428 |
---
license: afl-3.0
---
|
masakhane/byt5_en_yor_news
|
0064c2116c04ad96f8b384581b8fae6d262f2272
|
2022-05-10T12:50:11.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_en_yor_news
| 0 | null |
transformers
| 37,429 |
---
license: afl-3.0
---
|
masakhane/mt5_yor_en_news
|
fbafd6ddc984e76536797ea648bcf638161eb887
|
2022-05-10T12:59:14.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_yor_en_news
| 0 | null |
transformers
| 37,430 |
---
license: afl-3.0
---
|
masakhane/mbart50_en_yor_news
|
88e879d20029cbe7722c69a0a6175760adfbe73b
|
2022-05-10T12:59:20.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_en_yor_news
| 0 | null |
transformers
| 37,431 |
---
license: afl-3.0
---
|
masakhane/mbart50_yor_en_news
|
4124838f1e6481362e4fc40c7369245eb84fffad
|
2022-05-10T12:59:22.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_yor_en_news
| 0 | null |
transformers
| 37,432 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_yor_news
|
18acf0cd39255cd255c6247ff333ae94bdc6a9af
|
2022-05-10T13:06:54.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_yor_news
| 0 | null |
transformers
| 37,433 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_yor_en_news
|
cbd3995cf1e4cb09866f7bc888c0e083837076c3
|
2022-05-10T13:06:56.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_yor_en_news
| 0 | null |
transformers
| 37,434 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_yor_en_rel_news
|
f7cdcd921dc01ee5731d37321b4d596f82cda1fe
|
2022-05-10T13:06:51.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_yor_en_rel_news
| 0 | null |
transformers
| 37,435 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_yor_rel_news
|
ff9c7074bd1a35ccd34cdb1361a46c19ee4a4b75
|
2022-05-10T13:06:59.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_yor_rel_news
| 0 | null |
transformers
| 37,436 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_yor_rel_news_ft
|
dbab558511e313ebb496b396abce923ea76e3e0c
|
2022-05-10T13:34:33.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_yor_rel_news_ft
| 0 | null |
transformers
| 37,437 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_yor_en_rel_news_ft
|
22b87ae0e99dfaae41bec2d974a09cd4d3bee04a
|
2022-05-10T13:34:38.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_yor_en_rel_news_ft
| 0 | null |
transformers
| 37,438 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_yor_rel_ft
|
24a57cfdbf703de9879c7ced3c26596fdd34e5d9
|
2022-05-10T13:34:36.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_yor_rel_ft
| 0 | null |
transformers
| 37,439 |
Entry not found
|
masakhane/m2m100_418M_yor_en_rel_ft
|
81f081b87c2cae1d9b749ccd56898d759162e5c7
|
2022-05-10T13:34:41.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_yor_en_rel_ft
| 0 | null |
transformers
| 37,440 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_yor_rel
|
2b3310280a8d722bca9d5a5a2ffc759309302e12
|
2022-05-10T13:38:28.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_yor_rel
| 0 | null |
transformers
| 37,441 |
---
license: afl-3.0
---
|
masakhane/afrimt5_en_tsn_news
|
df3c9a2a1ecd3d8a7efcd461b4305fb4ca4df275
|
2022-05-10T14:50:57.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_en_tsn_news
| 0 | null |
transformers
| 37,442 |
---
license: afl-3.0
---
|
masakhane/afrimt5_tsn_en_news
|
df91a8c50e119865f27d279c58fba9f7697e745f
|
2022-05-10T14:51:00.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_tsn_en_news
| 0 | null |
transformers
| 37,443 |
---
license: afl-3.0
---
|
masakhane/afrimbart_tsn_en_news
|
a9484b162ca56515904d4e0f25f5e2c52ea79cf3
|
2022-05-10T14:51:02.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_tsn_en_news
| 0 | null |
transformers
| 37,444 |
---
license: afl-3.0
---
|
masakhane/afrimbart_en_tsn_news
|
303765b01229dcd37996426839d0d70f7ea9b7d5
|
2022-05-10T14:51:05.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_en_tsn_news
| 0 | null |
transformers
| 37,445 |
---
license: afl-3.0
---
|
masakhane/afribyt5_tsn_en_news
|
a822051c38e74058da932cc1c84c424fad05ff40
|
2022-05-10T16:15:00.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_tsn_en_news
| 0 | null |
transformers
| 37,446 |
---
license: afl-3.0
---
|
masakhane/afribyt5_en_tsn_news
|
c0c6561545344dbaed286dc7e67f5eb3d8dffbd9
|
2022-05-10T16:15:07.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_en_tsn_news
| 0 | null |
transformers
| 37,447 |
---
license: afl-3.0
---
|
masakhane/byt5_en_tsn_news
|
9185455f4df1a5cfda75abb75d90c36c7c0289a6
|
2022-05-10T16:15:05.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_en_tsn_news
| 0 | null |
transformers
| 37,448 |
---
license: afl-3.0
---
|
masakhane/byt5_tsn_en_news
|
748bf3e329e485fe642d4ac2f91d5b1bda841b77
|
2022-05-10T16:15:03.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_tsn_en_news
| 0 | null |
transformers
| 37,449 |
---
license: afl-3.0
---
|
masakhane/mt5_tsn_en_news
|
da8cb90d429cd7e0c0611d0c1172ebe9942b1a68
|
2022-05-10T16:25:23.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_tsn_en_news
| 0 | null |
transformers
| 37,450 |
---
license: afl-3.0
---
|
masakhane/mt5_en_tsn_news
|
4fa67ef4ee8ac2baecb7af933bfd59e26d037bba
|
2022-05-10T16:25:21.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_en_tsn_news
| 0 | null |
transformers
| 37,451 |
---
license: afl-3.0
---
|
masakhane/mbart50_en_tsn_news
|
7f1f82bd3f4d4fd7fec0e69d7a975ac371cc22b9
|
2022-05-10T16:25:19.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_en_tsn_news
| 0 | null |
transformers
| 37,452 |
---
license: afl-3.0
---
|
masakhane/mbart50_tsn_en_news
|
fdd9a61592051d3549daf8d0c39e69e6a24f82cd
|
2022-05-10T16:25:25.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mbart50_tsn_en_news
| 0 | null |
transformers
| 37,453 |
---
license: afl-3.0
---
|
huxxx657/roberta-base-finetuned-scrambled-squad-5
|
44fcc845adbaa9a6c81a69640adbefcaa1dae057
|
2022-05-10T16:43:15.000Z
|
[
"pytorch",
"tensorboard",
"roberta",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
huxxx657
| null |
huxxx657/roberta-base-finetuned-scrambled-squad-5
| 0 | null |
transformers
| 37,454 |
---
license: mit
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: roberta-base-finetuned-scrambled-squad-5
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-scrambled-squad-5
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.7078
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.7695 | 1.0 | 5532 | 1.7078 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
masakhane/m2m100_418M_en_tsn_news
|
ee0dba7751223c74280cb4cca13049bba1c4ec1b
|
2022-05-10T16:32:31.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_tsn_news
| 0 | null |
transformers
| 37,455 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_tsn_en_news
|
d920bad02448c53ea43959de2dca3971873f71c4
|
2022-05-10T16:32:34.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_tsn_en_news
| 0 | null |
transformers
| 37,456 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_tsn_en_rel_news
|
f9ae74a2263e17c88ebc4cde25902ea139d85622
|
2022-05-10T16:32:41.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_tsn_en_rel_news
| 0 | null |
transformers
| 37,457 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_tsn_rel_news
|
d2ad774e541b0bde8d486b9e3ba65b621fc902fc
|
2022-05-10T16:32:38.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_tsn_rel_news
| 0 | null |
transformers
| 37,458 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_tsn_rel_ft
|
fc60cc683e029ad0589781598c8cad3e11c70a14
|
2022-05-10T16:43:42.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_tsn_rel_ft
| 0 | null |
transformers
| 37,459 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_tsn_en_rel_ft
|
454da39ce31c457851916fa2ad32c10077bb6191
|
2022-05-10T16:43:45.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_tsn_en_rel_ft
| 0 | null |
transformers
| 37,460 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_tsn_rel_news_ft
|
fc18956d2f543afe10b103a9903ab986b9a3d018
|
2022-05-10T16:43:37.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_tsn_rel_news_ft
| 0 | null |
transformers
| 37,461 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_tsn_en_rel_news_ft
|
9d8d32051cde29765d73c50b9cf9d45a340a7a2d
|
2022-05-10T16:43:40.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_tsn_en_rel_news_ft
| 0 | null |
transformers
| 37,462 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_tsn_en_rel
|
42bc944d3d076214f367258c3b8a7d259e2db351
|
2022-05-10T16:50:25.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_tsn_en_rel
| 0 | null |
transformers
| 37,463 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_tsn_rel
|
f761abcdc74e00f0cf9a02c7a50c102b8274fbd6
|
2022-05-10T16:50:27.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_tsn_rel
| 0 | null |
transformers
| 37,464 |
---
license: afl-3.0
---
|
husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6
|
894be4059e821f91b73853a7346692699c20b337
|
2022-05-11T01:56:24.000Z
|
[
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
] |
automatic-speech-recognition
| false |
husnu
| null |
husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6
| 0 | null |
transformers
| 37,465 |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- common_voice
model-index:
- name: wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_6
This model is a fine-tuned version of [husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_5](https://huggingface.co/husnu/wav2vec2-large-xls-r-300m-turkish-colab_common_voice-8_5) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3646
- Wer: 0.3478
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 6
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.1024 | 0.51 | 400 | 0.4030 | 0.4171 |
| 0.1533 | 1.02 | 800 | 0.4733 | 0.4570 |
| 0.1584 | 1.53 | 1200 | 0.4150 | 0.4371 |
| 0.1538 | 2.04 | 1600 | 0.4104 | 0.4390 |
| 0.1395 | 2.55 | 2000 | 0.3891 | 0.4133 |
| 0.1415 | 3.07 | 2400 | 0.3877 | 0.4015 |
| 0.1261 | 3.58 | 2800 | 0.3685 | 0.3899 |
| 0.1149 | 4.09 | 3200 | 0.3791 | 0.3881 |
| 0.1003 | 4.6 | 3600 | 0.3642 | 0.3626 |
| 0.0934 | 5.11 | 4000 | 0.3755 | 0.3516 |
| 0.0805 | 5.62 | 4400 | 0.3646 | 0.3478 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu113
- Datasets 2.1.0
- Tokenizers 0.10.3
|
theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed-v3-e16
|
7704dd633264ecff26869252d0695ca4e02de462
|
2022-05-11T14:04:06.000Z
|
[
"pytorch",
"tensorboard",
"bart",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] |
text2text-generation
| false |
theojolliffe
| null |
theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed-v3-e16
| 0 | null |
transformers
| 37,466 |
---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: distilbart-cnn-arxiv-pubmed-pubmed-v3-e16
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbart-cnn-arxiv-pubmed-pubmed-v3-e16
This model is a fine-tuned version of [theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed](https://huggingface.co/theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8306
- Rouge1: 56.4519
- Rouge2: 41.6818
- Rougel: 44.7833
- Rougelsum: 54.6359
- Gen Len: 141.9815
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 16
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:|
| No log | 1.0 | 398 | 1.1157 | 50.9487 | 31.3005 | 34.0145 | 48.6057 | 141.8519 |
| 1.3569 | 2.0 | 796 | 0.9688 | 53.0653 | 34.1855 | 37.0759 | 50.5942 | 141.2963 |
| 0.8704 | 3.0 | 1194 | 0.9053 | 53.9684 | 36.0388 | 38.6674 | 51.9604 | 142.0 |
| 0.6287 | 4.0 | 1592 | 0.8515 | 54.2379 | 36.4915 | 39.1393 | 51.6991 | 141.4074 |
| 0.6287 | 5.0 | 1990 | 0.8274 | 53.6806 | 34.8373 | 37.7369 | 51.239 | 141.6481 |
| 0.465 | 6.0 | 2388 | 0.8486 | 55.2534 | 39.1757 | 41.6366 | 53.2989 | 141.9259 |
| 0.3432 | 7.0 | 2786 | 0.8116 | 54.539 | 37.6314 | 40.5531 | 52.1997 | 141.3889 |
| 0.2577 | 8.0 | 3184 | 0.7976 | 54.8212 | 36.8347 | 40.6768 | 52.7785 | 142.0 |
| 0.204 | 9.0 | 3582 | 0.8010 | 53.9302 | 37.3523 | 40.135 | 52.139 | 141.7778 |
| 0.204 | 10.0 | 3980 | 0.8168 | 54.3151 | 38.0665 | 42.4112 | 52.4682 | 142.0 |
| 0.1663 | 11.0 | 4378 | 0.8171 | 54.7027 | 38.3117 | 42.0196 | 52.8821 | 142.0 |
| 0.135 | 12.0 | 4776 | 0.8202 | 54.1035 | 37.9154 | 40.7676 | 52.2509 | 142.0 |
| 0.1102 | 13.0 | 5174 | 0.8204 | 56.223 | 41.0947 | 44.0131 | 54.3353 | 142.0 |
| 0.0928 | 14.0 | 5572 | 0.8280 | 56.1637 | 41.0408 | 44.2931 | 54.5488 | 142.0 |
| 0.0928 | 15.0 | 5970 | 0.8273 | 56.2608 | 41.3855 | 44.4432 | 54.5778 | 142.0 |
| 0.0847 | 16.0 | 6368 | 0.8306 | 56.4519 | 41.6818 | 44.7833 | 54.6359 | 141.9815 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
huxxx657/roberta-base-finetuned-scrambled-squad-15
|
24ffd28c0a60a8201f41739591c90f9a92b1e9c5
|
2022-05-10T21:13:58.000Z
|
[
"pytorch",
"tensorboard",
"roberta",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
huxxx657
| null |
huxxx657/roberta-base-finetuned-scrambled-squad-15
| 0 | null |
transformers
| 37,467 |
---
license: mit
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: roberta-base-finetuned-scrambled-squad-15
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-scrambled-squad-15
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8722
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.8944 | 1.0 | 5590 | 1.8722 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.1.0
- Tokenizers 0.12.1
|
subhasisj/zh-finetuned-squad-qa-minilmv2-32
|
5410b4aa997a95d1c193323f4d4d645f351943d1
|
2022-05-10T21:58:09.000Z
|
[
"pytorch",
"tensorboard",
"bert",
"question-answering",
"transformers",
"generated_from_trainer",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
subhasisj
| null |
subhasisj/zh-finetuned-squad-qa-minilmv2-32
| 0 | null |
transformers
| 37,468 |
---
tags:
- generated_from_trainer
model-index:
- name: zh-finetuned-squad-qa-minilmv2-32
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# zh-finetuned-squad-qa-minilmv2-32
This model is a fine-tuned version of [subhasisj/zh-TAPT-MLM-MiniLM](https://huggingface.co/subhasisj/zh-TAPT-MLM-MiniLM) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.4311
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 256
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 338 | 2.3706 |
| 3.1254 | 2.0 | 676 | 1.7422 |
| 1.6449 | 3.0 | 1014 | 1.5323 |
| 1.6449 | 4.0 | 1352 | 1.4375 |
| 1.3122 | 5.0 | 1690 | 1.4311 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
huxxx657/roberta-base-finetuned-scrambled-squad-5-new
|
b40a9f360dddc2147ffd4fcf068ef649c4a464d3
|
2022-05-10T22:48:00.000Z
|
[
"pytorch",
"tensorboard",
"roberta",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
huxxx657
| null |
huxxx657/roberta-base-finetuned-scrambled-squad-5-new
| 0 | null |
transformers
| 37,469 |
---
license: mit
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: roberta-base-finetuned-scrambled-squad-5-new
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-scrambled-squad-5-new
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9098
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.941 | 1.0 | 5536 | 0.9098 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed-v3-e8
|
20c3e323d1e1c4136e9f007d595b911396f24f88
|
2022-05-11T12:17:20.000Z
|
[
"pytorch",
"tensorboard",
"bart",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] |
text2text-generation
| false |
theojolliffe
| null |
theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed-v3-e8
| 0 | null |
transformers
| 37,470 |
---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: distilbart-cnn-arxiv-pubmed-pubmed-v3-e8
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbart-cnn-arxiv-pubmed-pubmed-v3-e8
This model is a fine-tuned version of [theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed](https://huggingface.co/theojolliffe/distilbart-cnn-arxiv-pubmed-pubmed) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8422
- Rouge1: 54.9328
- Rouge2: 36.7154
- Rougel: 39.5674
- Rougelsum: 52.4889
- Gen Len: 142.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:|
| No log | 1.0 | 398 | 1.1158 | 50.9754 | 30.9416 | 33.9908 | 48.4925 | 142.0 |
| 1.3585 | 2.0 | 796 | 0.9733 | 52.7954 | 33.8196 | 36.7836 | 50.4929 | 141.9259 |
| 0.8785 | 3.0 | 1194 | 0.9142 | 53.5548 | 35.3954 | 37.4787 | 51.1024 | 142.0 |
| 0.6485 | 4.0 | 1592 | 0.8666 | 52.6449 | 34.0018 | 37.5391 | 50.428 | 141.4074 |
| 0.6485 | 5.0 | 1990 | 0.8458 | 53.8913 | 35.4481 | 38.1552 | 51.3737 | 141.8889 |
| 0.4993 | 6.0 | 2388 | 0.8571 | 54.7333 | 36.8173 | 40.228 | 52.5574 | 141.9444 |
| 0.3957 | 7.0 | 2786 | 0.8455 | 54.9826 | 37.9674 | 40.5786 | 52.5968 | 141.9815 |
| 0.328 | 8.0 | 3184 | 0.8422 | 54.9328 | 36.7154 | 39.5674 | 52.4889 | 142.0 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
huxxx657/roberta-base-finetuned-scrambled-squad-10-new
|
fd7f0c5d95889a1dbdfa2480414ec2d86896c7b4
|
2022-05-11T00:56:16.000Z
|
[
"pytorch",
"tensorboard",
"roberta",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
huxxx657
| null |
huxxx657/roberta-base-finetuned-scrambled-squad-10-new
| 0 | null |
transformers
| 37,471 |
---
license: mit
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: roberta-base-finetuned-scrambled-squad-10-new
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-finetuned-scrambled-squad-10-new
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9721
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.9984 | 1.0 | 5536 | 0.9721 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
mcurmei/flat_N_max
|
db7f186a638054b3ed0b2108c701e373b32b5aed
|
2022-05-11T03:33:16.000Z
|
[
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
mcurmei
| null |
mcurmei/flat_N_max
| 0 | null |
transformers
| 37,472 |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: flat_N_max
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# flat_N_max
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8536
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.2462 | 1.0 | 2213 | 1.7958 |
| 0.9293 | 2.0 | 4426 | 1.8093 |
| 0.7249 | 3.0 | 6639 | 1.8536 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
mcurmei/single_label_N_max
|
8e835a9b0c57580bb351420aa9d1955595e5523e
|
2022-05-11T04:49:29.000Z
|
[
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"dataset:squad",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
mcurmei
| null |
mcurmei/single_label_N_max
| 0 | null |
transformers
| 37,473 |
---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: single_label_N_max
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# single_label_N_max
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9326
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.9746 | 1.0 | 674 | 2.0265 |
| 1.6756 | 2.0 | 1348 | 1.9134 |
| 1.1333 | 3.0 | 2022 | 1.9326 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
mcurmei/unique_N_max
|
8f16ca5a07d01a9e70bcf2725a7e3cc5d67d5766
|
2022-05-11T06:19:57.000Z
|
[
"pytorch",
"tensorboard",
"distilbert",
"question-answering",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] |
question-answering
| false |
mcurmei
| null |
mcurmei/unique_N_max
| 0 | null |
transformers
| 37,474 |
---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: unique_N_max
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# unique_N_max
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.7409
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.0901 | 1.0 | 1162 | 1.8326 |
| 1.5479 | 2.0 | 2324 | 1.7201 |
| 1.2903 | 3.0 | 3486 | 1.7409 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
kathywu/DialoGPT-small-kathy
|
12aadc9ada0900ae05069852d9550366d77cd5be
|
2022-05-12T05:14:40.000Z
|
[
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] |
conversational
| false |
kathywu
| null |
kathywu/DialoGPT-small-kathy
| 0 | null |
transformers
| 37,475 |
---
tags:
- conversational
---
|
huggingtweets/elonmusk-kimkardashian
|
6f24ba7c0f73e3573992ff66a39d97bfeff817b3
|
2022-05-11T07:03:54.000Z
|
[
"pytorch",
"gpt2",
"text-generation",
"en",
"transformers",
"huggingtweets"
] |
text-generation
| false |
huggingtweets
| null |
huggingtweets/elonmusk-kimkardashian
| 0 | null |
transformers
| 37,476 |
---
language: en
thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1521957986335297536/itVSA7l0_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1446623190252343301/qIJAwo9I_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Elon Musk & Kim Kardashian</div>
<div style="text-align: center; font-size: 14px;">@elonmusk-kimkardashian</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Elon Musk & Kim Kardashian.
| Data | Elon Musk | Kim Kardashian |
| --- | --- | --- |
| Tweets downloaded | 222 | 3241 |
| Retweets | 16 | 715 |
| Short tweets | 47 | 667 |
| Tweets kept | 159 | 1859 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/17bd0o7t/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @elonmusk-kimkardashian's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2g9hft2n) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2g9hft2n/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/elonmusk-kimkardashian')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv
|
806c5e145bbbf3ab70e9570ca85fe9b1d8fd5c44
|
2022-05-11T13:55:48.000Z
|
[
"pytorch",
"tensorboard",
"bart",
"text2text-generation",
"transformers",
"generated_from_trainer",
"license:mit",
"model-index",
"autotrain_compatible"
] |
text2text-generation
| false |
theojolliffe
| null |
theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv
| 0 | null |
transformers
| 37,477 |
---
license: mit
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bart-cnn-pubmed-arxiv-pubmed-arxiv-arxiv
This model is a fine-tuned version of [theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv](https://huggingface.co/theojolliffe/bart-cnn-pubmed-arxiv-pubmed-arxiv) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8065
- Rouge1: 54.5916
- Rouge2: 36.7817
- Rougel: 40.4708
- Rougelsum: 52.5754
- Gen Len: 142.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:|
| 1.2945 | 1.0 | 795 | 0.9555 | 51.91 | 32.0926 | 33.6727 | 49.5306 | 142.0 |
| 0.7153 | 2.0 | 1590 | 0.8317 | 52.4708 | 34.1035 | 35.2968 | 50.2966 | 141.963 |
| 0.5398 | 3.0 | 2385 | 0.8133 | 52.4603 | 33.497 | 36.4227 | 50.2513 | 141.8704 |
| 0.3568 | 4.0 | 3180 | 0.8091 | 52.3993 | 34.2424 | 37.7819 | 50.2069 | 142.0 |
| 0.2842 | 5.0 | 3975 | 0.8065 | 54.5916 | 36.7817 | 40.4708 | 52.5754 | 142.0 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0+cu113
- Datasets 2.2.0
- Tokenizers 0.12.1
|
masakhane/afrimt5_en_twi_news
|
45eccd6d401769818c7f7983ca794935d4a7e2f4
|
2022-05-12T11:55:30.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_en_twi_news
| 0 | null |
transformers
| 37,478 |
---
license: afl-3.0
---
|
masakhane/afrimt5_twi_en_news
|
872285c9175e4e6d470a96348cc70e850955ed61
|
2022-05-12T11:55:47.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_twi_en_news
| 0 | null |
transformers
| 37,479 |
---
license: afl-3.0
---
|
masakhane/afrimt5_zul_en_news
|
39ea55cc4095b85a226a81c763018324ed7cae72
|
2022-05-12T12:51:45.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimt5_zul_en_news
| 0 | null |
transformers
| 37,480 |
---
license: afl-3.0
---
|
masakhane/afrimbart_en_zul_news
|
0b4389b70b17eb0e10b633c0106074a3a2c45d3b
|
2022-05-12T12:51:49.000Z
|
[
"pytorch",
"mbart",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afrimbart_en_zul_news
| 0 | null |
transformers
| 37,481 |
---
license: afl-3.0
---
|
masakhane/afribyt5_twi_en_news
|
5bc4ed934d97c928439f0c73b1e8ac2620ef4a65
|
2022-05-12T12:07:35.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_twi_en_news
| 0 | null |
transformers
| 37,482 |
---
license: afl-3.0
---
|
masakhane/afribyt5_en_twi_news
|
c2074a74a1ef1916b3d0dce432fcd456bc864569
|
2022-05-12T12:07:32.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_en_twi_news
| 0 | null |
transformers
| 37,483 |
---
license: afl-3.0
---
|
masakhane/afribyt5_zul_en_news
|
af94d5014988f24f5ed30b0b72fea3d85e5e8e05
|
2022-05-12T12:59:08.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/afribyt5_zul_en_news
| 0 | null |
transformers
| 37,484 |
---
license: afl-3.0
---
|
masakhane/byt5_en_twi_news
|
5f1fc04f243bf27bc9944b8d21510883a066e07e
|
2022-05-12T12:07:37.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_en_twi_news
| 0 | null |
transformers
| 37,485 |
---
license: afl-3.0
---
|
masakhane/byt5_en_zul_news
|
1740c75e08da71a06e8aae6c53a3b46fc4a950e6
|
2022-05-12T12:59:10.000Z
|
[
"pytorch",
"t5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/byt5_en_zul_news
| 0 | null |
transformers
| 37,486 |
---
license: afl-3.0
---
|
masakhane/mt5_twi_en_news
|
21041431db90a40fa4efd3c7ac61cdf7303d0e5a
|
2022-05-12T12:16:04.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_twi_en_news
| 0 | null |
transformers
| 37,487 |
---
license: afl-3.0
---
|
masakhane/mt5_en_twi_news
|
9404feb8904f00262b5a73e6b9036e2f2cb91b21
|
2022-05-12T12:16:07.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_en_twi_news
| 0 | null |
transformers
| 37,488 |
---
license: afl-3.0
---
|
masakhane/mt5_zul_en_news
|
e8980f16f1be056679c1cea885042b1554bc2016
|
2022-05-12T13:06:22.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_zul_en_news
| 0 | null |
transformers
| 37,489 |
---
license: afl-3.0
---
|
masakhane/mt5_en_zul_news
|
c0a588b3d1a386eed70946e792fe509fc81d091d
|
2022-05-12T13:06:25.000Z
|
[
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/mt5_en_zul_news
| 0 | null |
transformers
| 37,490 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_twi_en_news
|
7eeb65dfce7dc881bd03f9a63c89cb97a277cd84
|
2022-05-12T12:27:55.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_twi_en_news
| 0 | null |
transformers
| 37,491 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_zul_en_news
|
2e29100ba570d9bcbdd553c4d81300b7f9e54a5f
|
2022-05-12T13:14:49.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_zul_en_news
| 0 | null |
transformers
| 37,492 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_zul_rel_news
|
4c0674d688fd8ff20a42afe13820efbf241b7fed
|
2022-05-12T13:14:46.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_zul_rel_news
| 0 | null |
transformers
| 37,493 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_zul_en_rel_news
|
ae7c02b72bfa6c8dd27b19d6bc9d31d0784ffd98
|
2022-05-12T13:29:50.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_zul_en_rel_news
| 0 | null |
transformers
| 37,494 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_twi_en_rel_news
|
9cc90b51d8f3c91925373ac1eee2812efbf2eca1
|
2022-05-12T12:28:03.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_twi_en_rel_news
| 0 | null |
transformers
| 37,495 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_twi_rel_news
|
a479412305190f3a18c918fb4043af8260f53fb8
|
2022-05-12T12:27:59.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_twi_rel_news
| 0 | null |
transformers
| 37,496 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_en_twi_rel_news_ft
|
437133cfff246407570b304251e9628f7993ab57
|
2022-05-12T12:35:31.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_en_twi_rel_news_ft
| 0 | null |
transformers
| 37,497 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_twi_en_rel_news_ft
|
50be240022d7a74fa73980408c525d96c44deab6
|
2022-05-12T12:35:34.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_twi_en_rel_news_ft
| 0 | null |
transformers
| 37,498 |
---
license: afl-3.0
---
|
masakhane/m2m100_418M_zul_en_rel_news_ft
|
1f2aa786a231443a1017f5a200c307f590a44aca
|
2022-05-12T13:36:14.000Z
|
[
"pytorch",
"m2m_100",
"text2text-generation",
"transformers",
"license:afl-3.0",
"autotrain_compatible"
] |
text2text-generation
| false |
masakhane
| null |
masakhane/m2m100_418M_zul_en_rel_news_ft
| 0 | null |
transformers
| 37,499 |
---
license: afl-3.0
---
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.