Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
text-classification | transformers | {} | MickyMike/7-GPT2SP-talendesb | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/7-GPT2SP-titanium | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/7-GPT2SP-usergrid | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-appceleratorstudio-aptanastudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-appceleratorstudio-titanium | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-aptanastudio-titanium | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-mesos-usergrid | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-mule-mulestudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-mulestudio-mule | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-titanium-appceleratorstudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/77-GPT2SP-usergrid-mesos | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-appceleratorstudio-mule | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-appceleratorstudio-mulestudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-clover-usergrid | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-mule-titanium | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-mulestudio-titanium | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-talenddataquality-appceleratorstudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-talenddataquality-aptanastudio | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MickyMike/777-GPT2SP-talendesb-mesos | null | [
"transformers",
"pytorch",
"gpt2",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | {"license": "mit"} | MickyMike/CT5 | null | [
"transformers",
"jax",
"t5",
"text2text-generation",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | MickyMike/codebert-c | null | [
"transformers",
"pytorch",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | MickyMike/graphcodebert-c | null | [
"transformers",
"pytorch",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | MicroTurtle/DialoGPT-medium-shawn | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | MicroTurtle/DialoGPT-small-shawn | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {"tags": ["conversational"]} | Midhunkrishna/DialoGPT-small-bjk | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
#harry | {"tags": ["conversational"]} | Mierln/SmartHarry | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Edward Elric DialoGPT Model | {"tags": ["conversational"]} | MightyCoderX/DialoGPT-medium-EdwardElric | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Miguelm/Yyyy | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | kcbert-mlm-finetune | {} | stresscaptor/kcbert-mlm-finetune | null | [
"transformers",
"pytorch",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Mihneo/romanian_bert_news | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mike1123/Anna | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mikiyarb6/Ggjii | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | MilaBromm/TNGMain | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
# FEEL-IT: Emotion and Sentiment Classification for the Italian Language
## FEEL-IT Python Package
You can find the package that uses this model for emotion and sentiment classification **[here](https://github.com/MilaNLProc/feel-it)** it is meant to be a very simple interface over HuggingFace models.
## License
Users should refer to the [following license](https://developer.twitter.com/en/developer-terms/commercial-terms)
## Abstract
Sentiment analysis is a common task to understand people's reactions online. Still, we often need more nuanced information: is the post negative because the user is angry or because they are sad?
An abundance of approaches has been introduced for tackling both tasks. However, at least for Italian, they all treat only one of the tasks at a time. We introduce *FEEL-IT*, a novel benchmark corpus of Italian Twitter posts annotated with four basic emotions: **anger, fear, joy, sadness**. By collapsing them, we can also do **sentiment analysis**. We evaluate our corpus on benchmark datasets for both emotion and sentiment classification, obtaining competitive results.
We release an [open-source Python library](https://github.com/MilaNLProc/feel-it), so researchers can use a model trained on FEEL-IT for inferring both sentiments and emotions from Italian text.
| Model | Download |
| ------ | -------------------------|
| `feel-it-italian-sentiment` | [Link](https://huggingface.co/MilaNLProc/feel-it-italian-sentiment) |
| `feel-it-italian-emotion` | [Link](https://huggingface.co/MilaNLProc/feel-it-italian-emotion) |
## Model
The *feel-it-italian-emotion* model performs **emotion classification (joy, fear, anger, sadness)** on Italian. We fine-tuned the [UmBERTo model](https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1) on our new dataset (i.e., FEEL-IT) obtaining state-of-the-art performances on different benchmark corpora.
## Data
Our data has been collected by annotating tweets from a broad range of topics. In total, we have 2037 tweets annotated with an emotion label. More details can be found in our paper (https://aclanthology.org/2021.wassa-1.8/).
## Performance
We evaluate our performance using [MultiEmotions-It](http://ceur-ws.org/Vol-2769/paper_08.pdf). This dataset differs from FEEL-IT both in terms of topic variety and considered social media (i.e., YouTube and Facebook). We considered only the subset of emotions present in FEEL-IT. To give a point of reference, we also show the Most Frequent Class (MFC) baseline results. The results show that training on FEEL-IT brings stable performance even on datasets from different contexts.
| Training Dataset | Macro-F1 | Accuracy
| ------ | ------ |------ |
| MFC | 0.20 | 0.64 |
| FEEL-IT | **0.57** | **0.73** |
## Usage
```python
from transformers import pipeline
classifier = pipeline("text-classification",model='MilaNLProc/feel-it-italian-emotion',top_k=2)
prediction = classifier("Oggi sono proprio contento!")
print(prediction)
```
## Citation
Please use the following bibtex entry if you use this model in your project:
```
@inproceedings{bianchi2021feel,
title = {{"FEEL-IT: Emotion and Sentiment Classification for the Italian Language"}},
author = "Bianchi, Federico and Nozza, Debora and Hovy, Dirk",
booktitle = "Proceedings of the 11th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
year = "2021",
publisher = "Association for Computational Linguistics",
}
``` | {"language": "it", "tags": ["sentiment", "emotion", "Italian"]} | MilaNLProc/feel-it-italian-emotion | null | [
"transformers",
"pytorch",
"tf",
"camembert",
"text-classification",
"sentiment",
"emotion",
"Italian",
"it",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
# FEEL-IT: Emotion and Sentiment Classification for the Italian Language
## FEEL-IT Python Package
You can find the package that uses this model for emotion and sentiment classification **[here](https://github.com/MilaNLProc/feel-it)** it is meant to be a very simple interface over HuggingFace models.
## License
Users should refer to the [following license](https://developer.twitter.com/en/developer-terms/commercial-terms)
## Abstract
Sentiment analysis is a common task to understand people's reactions online. Still, we often need more nuanced information: is the post negative because the user is angry or because they are sad?
An abundance of approaches has been introduced for tackling both tasks. However, at least for Italian, they all treat only one of the tasks at a time. We introduce *FEEL-IT*, a novel benchmark corpus of Italian Twitter posts annotated with four basic emotions: **anger, fear, joy, sadness**. By collapsing them, we can also do **sentiment analysis**. We evaluate our corpus on benchmark datasets for both emotion and sentiment classification, obtaining competitive results.
We release an [open-source Python library](https://github.com/MilaNLProc/feel-it), so researchers can use a model trained on FEEL-IT for inferring both sentiments and emotions from Italian text.
| Model | Download |
| ------ | -------------------------|
| `feel-it-italian-sentiment` | [Link](https://huggingface.co/MilaNLProc/feel-it-italian-sentiment) |
| `feel-it-italian-emotion` | [Link](https://huggingface.co/MilaNLProc/feel-it-italian-emotion) |
## Model
The *feel-it-italian-sentiment* model performs **sentiment analysis** on Italian. We fine-tuned the [UmBERTo model](https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1) on our new dataset (i.e., FEEL-IT) obtaining state-of-the-art performances on different benchmark corpora.
## Data
Our data has been collected by annotating tweets from a broad range of topics. In total, we have 2037 tweets annotated with an emotion label. More details can be found in our paper (https://aclanthology.org/2021.wassa-1.8/).
## Performance
We evaluate our performance using [SENTIPOLC16 Evalita](http://www.di.unito.it/~tutreeb/sentipolc-evalita16/). We collapsed the FEEL-IT classes into 2 by mapping joy to the *positive* class and anger, fear and sadness into the *negative* class. We compare three different experimental configurations training on FEEL-IT, SENTIPOLC16, or both by testing on the SENTIPOLC16 test set.
The results show that training on FEEL-IT can provide better results on the SENTIPOLC16 test set than those that can be obtained with the SENTIPOLC16 training set.
| Training Dataset | Macro-F1 | Accuracy
| ------ | ------ |------ |
| SENTIPOLC16 | 0.80 | 0.81 |
| FEEL-IT | **0.81** | **0.84** |
| FEEL-IT+SentiPolc | 0.81 | 0.82
## Usage
```python
from transformers import pipeline
classifier = pipeline("text-classification",model='MilaNLProc/feel-it-italian-sentiment',top_k=2)
prediction = classifier("Oggi sono proprio contento!")
print(prediction)
```
## Citation
Please use the following bibtex entry if you use this model in your project:
```
@inproceedings{bianchi2021feel,
title = {{"FEEL-IT: Emotion and Sentiment Classification for the Italian Language"}},
author = "Bianchi, Federico and Nozza, Debora and Hovy, Dirk",
booktitle = "Proceedings of the 11th Workshop on Computational Approaches to Subjectivity, Sentiment and Social Media Analysis",
year = "2021",
publisher = "Association for Computational Linguistics",
}
``` | {"language": "it", "tags": ["sentiment", "Italian"]} | MilaNLProc/feel-it-italian-sentiment | null | [
"transformers",
"pytorch",
"tf",
"camembert",
"text-classification",
"sentiment",
"Italian",
"it",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Milian/bert_finetuning_test | null | [
"transformers",
"pytorch",
"jax",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | {} | MilkyLatte/q-g-model | null | [
"transformers",
"pytorch",
"jax",
"t5",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
# Slovak GPT-J-1.4B
Slovak GPT-J-1.4B with the whopping `1,415,283,792` parameters is the latest and the largest model released in Slovak GPT-J series. Smaller variants, [Slovak GPT-J-405M](https://huggingface.co/Milos/slovak-gpt-j-405M) and [Slovak GPT-J-162M](https://huggingface.co/Milos/slovak-gpt-j-162M), are still available.
## Model Description
Model is based on [GPT-J](https://github.com/kingoflolz/mesh-transformer-jax/) and has over 1.4B trainable parameters.
<figure>
| Hyperparameter | Value |
|----------------------|----------------------------------------------------------------------------------------------------------------------------------------|
| \\(n_{parameters}\\) | 1,415,283,792 |
| \\(n_{layers}\\) | 24 |
| \\(d_{model}\\) | 2048 |
| \\(d_{ff}\\) | 16384 |
| \\(n_{heads}\\) | 16 |
| \\(d_{head}\\) | 256 |
| \\(n_{ctx}\\) | 2048 |
| \\(n_{vocab}\\) | 50256 (same tokenizer as GPT-2/3†) |
| Positional Encoding | [Rotary Position Embedding (RoPE)](https://arxiv.org/abs/2104.09864) |
| RoPE Dimensions | [64](https://github.com/kingoflolz/mesh-transformer-jax/blob/f2aa66e0925de6593dcbb70e72399b97b4130482/mesh_transformer/layers.py#L223) |
<p><strong>†</strong> ByteLevelBPETokenizer was trained on the same Slovak corpus.</p></figure>
## Training data
Slovak GPT-J models were trained on a privately collected dataset consisting of predominantly Slovak text spanning different categories, e.g. web, news articles or even biblical texts - in total, over 40GB of text data was used to train this model.
The dataset was preprocessed and cleaned in a specific way that involves minor but a few caveats, so in order to achieve the expected performance, feel free to refer to [How to use] section. Please, keep in mind that despite the effort to remove inappropriate corpus, the model still might generate sensitive content or leak sensitive information.
## Training procedure
This model was trained for a bit more than 26.5 billion tokens over 48,001 steps on TPU v3-8 pod. The cross-entropy validation loss at the last step was `2.657`.
## Intended Use
Same as the original GPT-J, Slovak GPT-J learns an inner representation of the language that can be used to extract features useful for downstream tasks, however, the intended use is text generation from a prompt.
### How to use
This model along with the tokenizer can be easily loaded using the `AutoModelForCausalLM` functionality:
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Milos/slovak-gpt-j-1.4B")
model = AutoModelForCausalLM.from_pretrained("Milos/slovak-gpt-j-1.4B")
```
When generating a prompt keep in mind these three things, and you should be good to go:
1. Never leave trailing whitespaces. There's a difference between how tokenizer encodes "Mám rád slovenčinu" (no space after `slovenčinu`) and "Mám rád slovenčinu " (trailing space after `slovenčinu`), i.e `[12805, 2872, 46878]` != `[12805, 2872, 46878, 221]`.
2. Always use good ol' US English primary double quotation marks, i.e. `""` instead of `„“`.
3. In case of a new line always enter `\n\n` instead of a single `\n`
To illustrate an example of a basic text generation:
```
>>> prompt = "Tradičné jedlo na Orave sú"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input)
>>> tokenizer.decode(output[0])
'Tradičné jedlo na Orave sú bryndzové halušky\n\nNa Orave sa v minulosti varilo viac druhov'
```
### Capabilities, Limitations, and Biases
Based on the parameter count and our internal testing this should be by far the most capable model out of all models released in Slovak GPT-J series.
For sake of simplicity, I have omitted all the boilerplate code and swapped `\n` for new lines in the examples that follow. These were generated with both `top_p` and `temperature` set to `0.9`.
A spin on "Mor ho!" by Samo Chalpuka
```
PROMPT
Zleteli orly z Tatry, tiahnu na podolia, ponad vysoké hory, ponad rovné polia; preleteli cez Dunaj, cez tú šíru vodu, sadli tam za pomedzím slovenského rodu.
Duní Dunaj a luna za lunou sa valí
OUTPUT
Zleteli orly z Tatry, tiahnu na podolia, ponad vysoké hory, ponad rovné polia; preleteli cez Dunaj, cez tú šíru vodu, sadli tam za pomedzím slovenského rodu.
Duní Dunaj a luna za lunou sa valí: nad ním svieti pevný hrad na vysokom bralí. Pod tým hradom je zem slovenská, na nej je Slovensko naše.
Na tom vrchu leží slovenský národ, pod tým bralom leží krajina slovenská. A pod tým hradom leží i jeho sláva: na tom vrchu, na tom brale leží sláva slovenského národa.```
```
Jokes are a bit hit or miss. Very often they contain vulgarisms and/or are highly politically incorrect. This one is rather exception than the rule.
```
PROMPT
Nasledujú najlepšie vtipy.
Prečo
OUTPUT
Nasledujú najlepšie vtipy.
Prečo má policajt pri sebe vždy dva nože? Jeden je na sebaobranu, druhý na boj zblízka.
```
The infamous unicorn prompt from GPT-2 paper.
```
PROMPT
V šokujúcom náleze vedci objavili stádo jednorožcov žijúcich v odľahlom, predtým nepreskúmanom údolí, v Andách. Ešte prekvapivejšie pre vedcov bol fakt, že jednorožce hovorili plynule po slovensky.
OUTPUT
V šokujúcom náleze vedci objavili stádo jednorožcov žijúcich v odľahlom, predtým nepreskúmanom údolí, v Andách. Ešte prekvapivejšie pre vedcov bol fakt, že jednorožce hovorili plynule po slovensky.
V Andách ich totiž žilo približne 700 jedincov, ktoré sa živili výhradne materským mliekom. Slováci sú tak pravdepodobne prvými Európanmi, ktorí v Andách stretli jednorožca. "Je to dôkaz, že tieto zvieratá sú inteligentné a že žijú v Andách už stovky rokov," povedal pre webový portál televízie JOJ profesor geológie, geografie a zoológie, Milan Kováč.
Podľa profesora Kováča si v Andách zvieratá vytvárajú svoj vlastný jazyk. Je to zároveň dôkaz, že jednorožce žili v minulosti aj v slovenských pohoriach. "Jednorožce sa tam síce vyskytovali, ale neboli tak dobre preskúmané, ako teraz v Andách."
Na Slovensku však ľudia o jednorožcoch donedávna vedeli veľmi málo.<|endoftext|>
```
Since the dataset contains profanity, politically incorrect language, and (unintentionally) even a bits of text in Czech, the model can generate them in some extent too. Here's an example of the model output when prompt is in Czech:
```
>>> prompt = "Věta nesmí být sprostá a musí být zcela"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input, max_length=16)
>>> tokenizer.decode(output[0])
'Věta nesmí být sprostá a musí být zcela pravdivá.'
```
## Citation and Related Information
This was done as a moonlighting project during summer of 2021 to better understand transformers. I didn't have much free time to open source it properly, so it all sat on my hard drive until now :)
If you use this model or have any questions about it feel free to hit me up at [twitter](https://twitter.com/miloskondela) or check out my [github](https://github.com/kondela) profile.
### BibTeX entry
To cite this model:
```bibtex
@misc{slovak-gpt-j-1.4B,
author = {Kondela, Milos},
title = {{Slovak GPT-J-1.4B}},
howpublished = {\url{https://huggingface.co/Milos/slovak-gpt-j-1.4B}},
year = 2022,
month = February
}
```
To cite the codebase that trained this model:
```bibtex
@misc{mesh-transformer-jax,
author = {Wang, Ben},
title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}},
howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
year = 2021,
month = May
}
```
## Acknowledgements
This project was generously supported by [TPU Research Cloud (TRC) program](https://sites.research.google/trc/about/). Shoutout also goes to [Ben Wang](https://github.com/kingoflolz) and great [EleutherAI community](https://www.eleuther.ai/). | {"language": ["sk"], "license": "gpl-3.0", "tags": ["Slovak GPT-J", "pytorch", "causal-lm"]} | Milos/slovak-gpt-j-1.4B | null | [
"transformers",
"pytorch",
"gptj",
"text-generation",
"Slovak GPT-J",
"causal-lm",
"sk",
"arxiv:2104.09864",
"license:gpl-3.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Slovak GPT-J-162M
Slovak GPT-J-162M is the first model released in Slovak GPT-J series and the very first publicly available transformer trained predominantly on Slovak corpus. Since the initial release two other models were made public, [Slovak GPT-J-405M](https://huggingface.co/Milos/slovak-gpt-j-405M) and the largest [Slovak GPT-J-1.4B](https://huggingface.co/Milos/slovak-gpt-j-1.4B).
## Model Description
Model is based on [GPT-J](https://github.com/kingoflolz/mesh-transformer-jax/) and has over 162M trainable parameters.
<figure>
| Hyperparameter | Value |
|----------------------|-------------------------------------------------------------------------------------------------------------------------------|
| \\(n_{parameters}\\) | 162,454,608 |
| \\(n_{layers}\\) | 12 |
| \\(d_{model}\\) | 768 |
| \\(d_{ff}\\) | 16384 |
| \\(n_{heads}\\) | 16 |
| \\(d_{head}\\) | 256 |
| \\(n_{ctx}\\) | 2048 |
| \\(n_{vocab}\\) | 50256 (same tokenizer as GPT-2/3†) |
| Positional Encoding | [Rotary Position Embedding (RoPE)](https://arxiv.org/abs/2104.09864) |
| RoPE Dimensions | [64](https://github.com/kingoflolz/mesh-transformer-jax/blob/f2aa66e0925de6593dcbb70e72399b97b4130482/mesh_transformer/layers.py#L223) |
<p><strong>†</strong> ByteLevelBPETokenizer was trained on the same Slovak corpus.</p></figure>
## Training data
Slovak GPT-J-162M was trained on a privately collected dataset consisting of predominantly Slovak text spanning different categories, e.g. web, news articles or even biblical texts - in total, over 40GB of text data was used to train this model.
The dataset was preprocessed and cleaned in a specific way that involves minor but a few caveats, so in order to achieve the expected performance, feel free to refer to [How to use] section. Please, keep in mind that despite the effort to remove inappropriate parts of the corpus, the model still might generate sensitive content or leak sensitive information.
## Training procedure
This model was trained for almost 37 billion tokens over 69,001 steps on TPU v3-8 pod. The cross-entropy validation loss at the last step was 3.065.
## Intended Use
Same as the original GPT-J, Slovak GPT-J learns an inner representation of the language that can be used to extract features useful for downstream tasks, however, the intended use is text generation from a prompt.
### How to use
This model along with the tokenizer can be easily loaded using the `AutoModelForCausalLM` functionality:
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Milos/slovak-gpt-j-162M")
model = AutoModelForCausalLM.from_pretrained("Milos/slovak-gpt-j-162M")
```
When generating a prompt keep in mind these three things, and you should be good to go:
1. Never leave trailing whitespaces. There's a difference between how tokenizer encodes "Mám rád slovenčinu" (no space after `slovenčinu`) and "Mám rád slovenčinu " (trailing space after `slovenčinu`), i.e `[12805, 2872, 46878]` != `[12805, 2872, 46878, 221]`.
2. Always use good ol' US English primary double quotation marks, i.e. `""` instead of `„“`.
3. In case of a new line always enter `\n\n` instead of a single `\n`
To illustrate an example of a basic text generation:
```
>>> prompt = "Moje najobľubenejšie mesto na severe Slovenska je"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input)
>>> tokenizer.decode(output[0])
'Moje najobľubenejšie mesto na severe Slovenska je Žilina.\n\nV Žiline sa nachádza množstvo zaujímavých miest'
```
### Capabilities, Limitations, and Biases
First and foremost, the capability of this particular model is very limited due to its relatively small size totalling only 162M parameters, hence the intended use of this particular model is to educate and have fun! :)
Since the dataset contains profanity, politically incorrect language, and (unintentionally) even a bits of text in Czech, the model can generate them in some extent too. Here's an example of the model output when prompt is in Czech:
```
>>> prompt = "Věta nesmí být sprostá a musí být zcela"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input, max_length=16)
>>> tokenizer.decode(output[0])
'Věta nesmí být sprostá a musí být zcela věrná.'
```
## Citation and Related Information
This was done as a moonlighting project during summer of 2021 to better understand transformers. I didn't have much free time to open source it properly, so it all sat on my hard drive until now. Based on the popularity and interest in this model I might release _substantially_ larger versions of Slovak GPT-J models that are way more capable.
If you use this model or have any questions about it feel free to hit me up at [twitter](https://twitter.com/miloskondela) or check out my [github](https://github.com/kondela) profile.
### BibTeX entry
To cite this model:
```bibtex
@misc{slovak-gpt-j-162m,
author = {Kondela, Milos},
title = {{Slovak GPT-J-162M}},
howpublished = {\url{https://huggingface.co/Milos/slovak-gpt-j-162M}},
year = 2022,
month = February
}
```
To cite the codebase that trained this model:
```bibtex
@misc{mesh-transformer-jax,
author = {Wang, Ben},
title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}},
howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
year = 2021,
month = May
}
```
## Acknowledgements
This project was generously supported by [TPU Research Cloud (TRC) program](https://sites.research.google/trc/about/). Shoutout also goes to [Ben Wang](https://github.com/kingoflolz) and great [EleutherAI community](https://www.eleuther.ai/). | {"language": ["sk"], "license": "gpl-3.0", "tags": ["Slovak GPT-J", "pytorch", "causal-lm"]} | Milos/slovak-gpt-j-162M | null | [
"transformers",
"pytorch",
"gptj",
"text-generation",
"Slovak GPT-J",
"causal-lm",
"sk",
"arxiv:2104.09864",
"license:gpl-3.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Slovak GPT-J-405M
Slovak GPT-J-405M is the second model released in Slovak GPT-J series after its smaller variant [Slovak GPT-J-162M](https://huggingface.co/Milos/slovak-gpt-j-162M). Since then a larger [Slovak GPT-J-1.4B](https://huggingface.co/Milos/slovak-gpt-j-1.4B) was released.
## Model Description
Model is based on [GPT-J](https://github.com/kingoflolz/mesh-transformer-jax/) and has over 405M trainable parameters.
<figure>
| Hyperparameter | Value |
|----------------------|----------------------------------------------------------------------------------------------------------------------------------------|
| \\(n_{parameters}\\) | 405,677,136 |
| \\(n_{layers}\\) | 24 |
| \\(d_{model}\\) | 1024 |
| \\(d_{ff}\\) | 16384 |
| \\(n_{heads}\\) | 16 |
| \\(d_{head}\\) | 256 |
| \\(n_{ctx}\\) | 2048 |
| \\(n_{vocab}\\) | 50256 (same tokenizer as GPT-2/3†) |
| Positional Encoding | [Rotary Position Embedding (RoPE)](https://arxiv.org/abs/2104.09864) |
| RoPE Dimensions | [64](https://github.com/kingoflolz/mesh-transformer-jax/blob/f2aa66e0925de6593dcbb70e72399b97b4130482/mesh_transformer/layers.py#L223) |
<p><strong>†</strong> ByteLevelBPETokenizer was trained on the same Slovak corpus.</p></figure>
## Training data
Slovak GPT-J models were trained on a privately collected dataset consisting of predominantly Slovak text spanning different categories, e.g. web, news articles or even biblical texts - in total, over 40GB of text data was used to train this model.
The dataset was preprocessed and cleaned in a specific way that involves minor but a few caveats, so in order to achieve the expected performance, feel free to refer to [How to use] section. Please, keep in mind that despite the effort to remove inappropriate corpus, the model still might generate sensitive content or leak sensitive information.
## Training procedure
This model was trained for a bit more than 36.5 billion tokens over 69,001 steps on TPU v3-8 pod. The cross-entropy validation loss at the last step was `2.821`.
## Intended Use
Same as the original GPT-J, Slovak GPT-J learns an inner representation of the language that can be used to extract features useful for downstream tasks, however, the intended use is text generation from a prompt.
### How to use
This model along with the tokenizer can be easily loaded using the `AutoModelForCausalLM` functionality:
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Milos/slovak-gpt-j-405M")
model = AutoModelForCausalLM.from_pretrained("Milos/slovak-gpt-j-405M")
```
When generating a prompt keep in mind these three things, and you should be good to go:
1. Never leave trailing whitespaces. There's a difference between how tokenizer encodes "Mám rád slovenčinu" (no space after `slovenčinu`) and "Mám rád slovenčinu " (trailing space after `slovenčinu`), i.e `[12805, 2872, 46878]` != `[12805, 2872, 46878, 221]`.
2. Always use good ol' US English primary double quotation marks, i.e. `""` instead of `„“`.
3. In case of a new line always enter `\n\n` instead of a single `\n`
To illustrate an example of a basic text generation:
```
>>> prompt = "Tradičné jedlo na Orave sú"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input)
>>> tokenizer.decode(output[0])
'Tradičné jedlo na Orave sú bryndzové halušky\n\nNa Orave sa v minulosti varilo viac druhov'
```
### Capabilities, Limitations, and Biases
The capability of this particular model is somewhat decent despite its small size totalling 405M parameters. With relative ease it can manage to generate interesting and grammatically correct content.
For example, you can try few of the following prompts. (For sake of simplicity, I have omitted all the boilerplate code and swapped `\n` for new lines).
Try generating "How to" articles:
```
PROMPT
Ako napísať pôsobivú esej krok za krokom
OUTPUT
Ako napísať pôsobivú esej krok za krokom
V tomto článku sa dozviete, ako napísať esej, ktorá bude mať v sebe niečo, čo zaujme a bude sa vám páčiť.
V prvom rade si musíte uvedomiť, že esej je vlastne písaný text, ktorý má byť napísaný na počítači.'
```
However, relying on the model to produce factually correct information isn't recommended.
Or try to generate chat conversations:
```
PROMPT
Peter: Ako sa mas?
Lucia: Ale celkom dobre. Co si robil?
Peter:
OUTPUT
Peter: Ako sa mas?
Lucia: Ale celkom dobre. Co si robil?
Peter: No, bol som na chate.
Lucia: A co si tam robil?
Peter: No, bol som tam s kamošmi.
Lucia: A co si tam robil?
Peter: No, bol som tam s kamošmi.
```
Apparently either Peters are more likely to act suspiciously in this particular context or there's a problem with the model. Let's assume that the second explanation may hold some merit. In general, GPT models can (and often will) get into a repeating cycle of generating same content. This is a common problem beyond the scope of this README, however, see [generate's documentation](https://huggingface.co/docs/transformers/master/en/main_classes/model#transformers.generation_utils.GenerationMixin.generate) on how to introduce a frequency/repetition penalty.
Since the dataset contains profanity, politically incorrect language, and (unintentionally) even a bits of text in Czech, the model can generate them in some extent too. Here's an example of the model output when prompt is in Czech:
```
>>> prompt = "Věta nesmí být sprostá a musí být zcela"
>>> encoded_input = tokenizer(prompt, return_tensors='pt')
>>> output = model.generate(**encoded_input, max_length=16)
>>> tokenizer.decode(output[0])
'Věta nesmí být sprostá a musí být zcela pravdivá.'
```
## Citation and Related Information
This was done as a moonlighting project during summer of 2021 to better understand transformers. I didn't have much free time to open source it properly, so it all sat on my hard drive until now :)
If you use this model or have any questions about it feel free to hit me up at [twitter](https://twitter.com/miloskondela) or check out my [github](https://github.com/kondela) profile.
### BibTeX entry
To cite this model:
```bibtex
@misc{slovak-gpt-j-405m,
author = {Kondela, Milos},
title = {{Slovak GPT-J-405M}},
howpublished = {\url{https://huggingface.co/Milos/slovak-gpt-j-405M}},
year = 2022,
month = February
}
```
To cite the codebase that trained this model:
```bibtex
@misc{mesh-transformer-jax,
author = {Wang, Ben},
title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}},
howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}},
year = 2021,
month = May
}
```
## Acknowledgements
This project was generously supported by [TPU Research Cloud (TRC) program](https://sites.research.google/trc/about/). Shoutout also goes to [Ben Wang](https://github.com/kingoflolz) and great [EleutherAI community](https://www.eleuther.ai/). | {"language": ["sk"], "license": "gpl-3.0", "tags": ["Slovak GPT-J", "pytorch", "causal-lm"]} | Milos/slovak-gpt-j-405M | null | [
"transformers",
"pytorch",
"gptj",
"text-generation",
"Slovak GPT-J",
"causal-lm",
"sk",
"arxiv:2104.09864",
"license:gpl-3.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text2text-generation | transformers |
# RuT5Tox | {"language": ["ru"], "license": ["apache-2.0"], "tags": ["t5"], "inference": {"parameters": {"num_beams": 5, "no_repeat_ngram_size": 4}}, "widget": [{"text": "\u0427\u0442\u043e \u044d\u0442\u043e \u0437\u0430 \u0435\u0440\u0443\u043d\u0434\u0430?"}]} | IlyaGusev/rut5_tox | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"ru",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Mindjacker/DialoGPT | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | [DialogLM: Pre-trained Model for Long Dialogue Understanding and Summarization](https://arxiv.org/abs/2109.02492).
## Introduction
DialogLED is a pre-trained model for long dialogue understanding and summarization. It builds on the Longformer-Encoder-Decoder (LED) architecture and uses window-based denoising as the pre-training task on a large amount of long dialogue data for further training. Here is a base version of DialogLED, the input length is limited to 16,384 in the pre-training phase.
## Finetuning for Downstream Tasks
Please refer to [our GitHub page](https://github.com/microsoft/DialogLM). | {} | MingZhong/DialogLED-base-16384 | null | [
"transformers",
"pytorch",
"led",
"text2text-generation",
"arxiv:2109.02492",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text2text-generation | transformers | [DialogLM: Pre-trained Model for Long Dialogue Understanding and Summarization](https://arxiv.org/abs/2109.02492).
## Introduction
DialogLED is a pre-trained model for long dialogue understanding and summarization. It builds on the Longformer-Encoder-Decoder (LED) architecture and uses window-based denoising as the pre-training task on a large amount of long dialogue data for further training. Here is a large version of DialogLED, the input length is limited to 5,120 in the pre-training phase.
## Finetuning for Downstream Tasks
Please refer to [our GitHub page](https://github.com/microsoft/DialogLM). | {} | MingZhong/DialogLED-large-5120 | null | [
"transformers",
"pytorch",
"led",
"text2text-generation",
"arxiv:2109.02492",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# tmp6tsjsfbf
This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.0178
- Train Sparse Categorical Accuracy: 0.9962
- Epoch: 49
## Model description
This model classifies the title of a content (e.g., YouTube video, article, or podcast episode) into 1 of 8 subjects
0. art
1. personal development
2. world
3. health
4. science
5. business
6. humanities
7. technology.
This model is used to support [Sanderling](https://sanderling.app)
## Intended uses & limitations
More information needed
## Training and evaluation data
We used 1.5k labeled titles to train the model. Majority of the training dataset are English titles. The rest are Chinese titles.
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': 5e-06, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Train Sparse Categorical Accuracy | Epoch |
|:----------:|:---------------------------------:|:-----:|
| 1.8005 | 0.3956 | 0 |
| 1.3302 | 0.5916 | 1 |
| 0.8998 | 0.7575 | 2 |
| 0.6268 | 0.8468 | 3 |
| 0.4239 | 0.9062 | 4 |
| 0.2982 | 0.9414 | 5 |
| 0.2245 | 0.9625 | 6 |
| 0.1678 | 0.9730 | 7 |
| 0.1399 | 0.9745 | 8 |
| 0.1059 | 0.9827 | 9 |
| 0.0822 | 0.9850 | 10 |
| 0.0601 | 0.9902 | 11 |
| 0.0481 | 0.9932 | 12 |
| 0.0386 | 0.9955 | 13 |
| 0.0292 | 0.9977 | 14 |
| 0.0353 | 0.9940 | 15 |
| 0.0336 | 0.9932 | 16 |
| 0.0345 | 0.9910 | 17 |
| 0.0179 | 0.9985 | 18 |
| 0.0150 | 0.9985 | 19 |
| 0.0365 | 0.9895 | 20 |
| 0.0431 | 0.9895 | 21 |
| 0.0243 | 0.9955 | 22 |
| 0.0317 | 0.9925 | 23 |
| 0.0375 | 0.9902 | 24 |
| 0.0138 | 0.9970 | 25 |
| 0.0159 | 0.9977 | 26 |
| 0.0160 | 0.9962 | 27 |
| 0.0151 | 0.9977 | 28 |
| 0.0337 | 0.9902 | 29 |
| 0.0119 | 0.9977 | 30 |
| 0.0165 | 0.9955 | 31 |
| 0.0133 | 0.9977 | 32 |
| 0.0047 | 1.0 | 33 |
| 0.0037 | 1.0 | 34 |
| 0.0033 | 1.0 | 35 |
| 0.0031 | 1.0 | 36 |
| 0.0036 | 1.0 | 37 |
| 0.0343 | 0.9887 | 38 |
| 0.0234 | 0.9962 | 39 |
| 0.0034 | 1.0 | 40 |
| 0.0036 | 1.0 | 41 |
| 0.0261 | 0.9917 | 42 |
| 0.0111 | 0.9970 | 43 |
| 0.0039 | 1.0 | 44 |
| 0.0214 | 0.9932 | 45 |
| 0.0044 | 0.9985 | 46 |
| 0.0122 | 0.9985 | 47 |
| 0.0119 | 0.9962 | 48 |
| 0.0178 | 0.9962 | 49 |
### Framework versions
- Transformers 4.15.0
- TensorFlow 2.7.0
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_keras_callback"], "model-index": [{"name": "tmp6tsjsfbf", "results": []}]} | Mingyi/classify_title_subject | null | [
"transformers",
"tf",
"bert",
"text-classification",
"generated_from_keras_callback",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Mingyin0312/model_Ming | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | MinnaLee/gpt2-wikitext2 | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | MinoS/fine_tuned_bert_fr | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-ner
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0596
- Precision: 0.9240
- Recall: 0.9378
- F1: 0.9308
- Accuracy: 0.9838
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.2381 | 1.0 | 878 | 0.0707 | 0.9100 | 0.9240 | 0.9170 | 0.9805 |
| 0.0563 | 2.0 | 1756 | 0.0583 | 0.9246 | 0.9382 | 0.9314 | 0.9835 |
| 0.03 | 3.0 | 2634 | 0.0596 | 0.9240 | 0.9378 | 0.9308 | 0.9838 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["conll2003"], "metrics": ["precision", "recall", "f1", "accuracy"], "model-index": [{"name": "distilbert-base-uncased-finetuned-ner", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "conll2003", "type": "conll2003", "args": "conll2003"}, "metrics": [{"type": "precision", "value": 0.9239501818582607, "name": "Precision"}, {"type": "recall", "value": 0.9378006488421524, "name": "Recall"}, {"type": "f1", "value": 0.9308238951809905, "name": "F1"}, {"type": "accuracy", "value": 0.9837800054013695, "name": "Accuracy"}]}]}]} | Minowa/distilbert-base-uncased-finetuned-ner | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"generated_from_trainer",
"dataset:conll2003",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {"license": "afl-3.0"} | Minzhi/Gradio | null | [
"license:afl-3.0",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Miojinho/AM | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mirac/test | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mirelle/mt5-small-finetuned-en-to-ro | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-ro-to-en
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the wmt16 dataset.
It achieves the following results on the evaluation set:
- Loss: 1.5877
- Bleu: 13.4499
- Gen Len: 17.5073
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|
| 1.6167 | 0.05 | 2000 | 1.8649 | 9.7029 | 17.5753 |
| 1.4551 | 0.1 | 4000 | 1.7810 | 10.6382 | 17.5358 |
| 1.3723 | 0.16 | 6000 | 1.7369 | 11.1285 | 17.5158 |
| 1.3373 | 0.21 | 8000 | 1.7086 | 11.6173 | 17.5013 |
| 1.2935 | 0.26 | 10000 | 1.6890 | 12.0641 | 17.5038 |
| 1.2632 | 0.31 | 12000 | 1.6670 | 12.3012 | 17.5253 |
| 1.2463 | 0.37 | 14000 | 1.6556 | 12.3991 | 17.5153 |
| 1.2272 | 0.42 | 16000 | 1.6442 | 12.7392 | 17.4732 |
| 1.2052 | 0.47 | 18000 | 1.6328 | 12.8446 | 17.5143 |
| 1.1985 | 0.52 | 20000 | 1.6233 | 13.0892 | 17.4807 |
| 1.1821 | 0.58 | 22000 | 1.6153 | 13.1529 | 17.4952 |
| 1.1791 | 0.63 | 24000 | 1.6079 | 13.2964 | 17.5088 |
| 1.1698 | 0.68 | 26000 | 1.6038 | 13.3548 | 17.4842 |
| 1.154 | 0.73 | 28000 | 1.5957 | 13.3012 | 17.5053 |
| 1.1634 | 0.79 | 30000 | 1.5931 | 13.4203 | 17.5083 |
| 1.1487 | 0.84 | 32000 | 1.5893 | 13.3959 | 17.5123 |
| 1.1495 | 0.89 | 34000 | 1.5875 | 13.3745 | 17.4902 |
| 1.1458 | 0.94 | 36000 | 1.5877 | 13.4129 | 17.5043 |
| 1.1465 | 1.0 | 38000 | 1.5877 | 13.4499 | 17.5073 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["wmt16"], "metrics": ["bleu"], "model-index": [{"name": "t5-small-finetuned-ro-to-en", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "wmt16", "type": "wmt16", "args": "ro-en"}, "metrics": [{"type": "bleu", "value": 13.4499, "name": "Bleu"}]}]}]} | Mirelle/t5-small-finetuned-ro-to-en | null | [
"transformers",
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"generated_from_trainer",
"dataset:wmt16",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Mirjam/Dutch_canon-finetuned | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# test-finetuned
This model is a fine-tuned version of [yhavinga/t5-v1.1-base-dutch-cnn-test](https://huggingface.co/yhavinga/t5-v1.1-base-dutch-cnn-test) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 3
- eval_batch_size: 3
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| No log | 1.0 | 1 | nan | 33.8462 | 31.746 | 30.7692 | 30.7692 | 86.0 |
### Framework versions
- Transformers 4.15.0
- Pytorch 1.10.1
- Datasets 1.15.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "test-finetuned", "results": []}]} | Mirjam/test-finetuned | null | [
"transformers",
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7134
- Matthews Correlation: 0.5411
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.5294 | 1.0 | 535 | 0.5082 | 0.4183 |
| 0.3483 | 2.0 | 1070 | 0.4969 | 0.5259 |
| 0.2355 | 3.0 | 1605 | 0.6260 | 0.5065 |
| 0.1733 | 4.0 | 2140 | 0.7134 | 0.5411 |
| 0.1238 | 5.0 | 2675 | 0.8516 | 0.5291 |
### Framework versions
- Transformers 4.12.3
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["glue"], "metrics": ["matthews_correlation"], "model-index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.54109909504615, "name": "Matthews Correlation"}]}]}]} | MisbaHF/distilbert-base-uncased-finetuned-cola | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"generated_from_trainer",
"dataset:glue",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Misiekofski/me | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilroberta-base-testingSB-testingSB
This model is a fine-tuned version of [MistahCase/distilroberta-base-testingSB](https://huggingface.co/MistahCase/distilroberta-base-testingSB) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9870
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.1463 | 1.0 | 1461 | 1.1171 |
| 1.0188 | 2.0 | 2922 | 1.0221 |
| 1.0016 | 3.0 | 4383 | 0.9870 |
### Framework versions
- Transformers 4.20.0
- Pytorch 1.11.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilroberta-base-testingSB-testingSB", "results": []}]} | MistahCase/distilroberta-base-testingSB-testingSB | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"fill-mask",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilroberta-base-testingSB
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on a company specific, Danish dataset.
It achieves the following results on the evaluation set:
- Loss: 1.0403
## Model description
Customer-specific model used to embed asset management work orders in Danish
## Intended uses & limitations
Customer-specific and trained for unsupervised categorization tasks
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
Epoch Training Loss Validation Loss
1 0.988500 1.056376
2 0.996300 1.027803
3 0.990300 1.040270
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.98850 | 1.0 | 1461 | 1.5211 |
| 1.3179 | 2.0 | 2922 | 1.3314 |
| 1.1931 | 3.0 | 4383 | 1.2530 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilroberta-base-testingSB", "results": []}]} | MistahCase/distilroberta-base-testingSB | null | [
"transformers",
"pytorch",
"tensorboard",
"roberta",
"fill-mask",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers | {} | MisterFavourite/Genesis_KJV_fine_tuned | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | MisterFavourite/Sherlock_Holmes_fine_tuned | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | MoaazZaki/machathonmodel | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
# Model Description
This model is fine-tuning bert-base model on Cola dataset
| {"language": "en", "license": "mit", "tags": ["sequence classification"], "datasets": ["cola"]} | Modfiededition/bert-fine-tuned-cola | null | [
"transformers",
"tf",
"bert",
"text-classification",
"sequence classification",
"en",
"dataset:cola",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Modfiededition/dummy-model | null | [
"transformers",
"tf",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
question-answering | transformers | {} | Modfiededition/roberta-fine-tuned-tweet-sentiment-extractor | null | [
"transformers",
"tf",
"roberta",
"question-answering",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | ## t5-base-fine-tuned-on-jfleg
T5-base model fine-tuned on the [**JFLEG dataset**](https://huggingface.co/datasets/jfleg) with the objective of **text2text-generation**.
# Model Description:
T5 is an encoder-decoder model pre-trained with a multi-task mixture of unsupervised and supervised tasks and for which each task is converted into a text-to-text format.
.T5 works well on a variety of tasks out-of-the-box by prepending a different prefix to the input corresponding to each task, e.g., for translation: translate English to German: …, for summarization: summarize: ….
The T5 model was presented in [**Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer**](https://arxiv.org/pdf/1910.10683.pdf) by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu.
## Pre-Processing:
For this task of grammar correction, we’ll use the prefix “grammar: “ to each of the input sentences.
```
Grammar: Your Sentence
```
## How to use :
You can use this model directly with the pipeline for detecting and correcting grammatical mistakes.
```
from transformers import pipeline
model_checkpoint = "Modfiededition/t5-base-fine-tuned-on-jfleg"
model = pipeline("text2text-generation", model=model_checkpoint)
text = "I am write on AI"
output = model(text)
```
Result(s)
```
I am writing on AI.
```
| {} | Modfiededition/t5-base-fine-tuned-on-jfleg | null | [
"transformers",
"tf",
"t5",
"text2text-generation",
"arxiv:1910.10683",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Okabe Rintaro DialoGPT Model | {"tags": ["conversational"]} | ModzabazeR/small-okaberintaro | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers | {} | MoeZilla/Chatbot | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | MoeZilla/Itel-chatbot | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mofe/first_model_repo | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
automatic-speech-recognition | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [hf-test/xls-r-dummy](https://huggingface.co/hf-test/xls-r-dummy) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - AB dataset.
It achieves the following results on the evaluation set:
- Loss: 207.6065
- Wer: 1.5484
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 2
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- training_steps: 10
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu113
- Datasets 1.18.4.dev0
- Tokenizers 0.11.0
| {"language": ["ab"], "tags": ["automatic-speech-recognition", "mozilla-foundation/common_voice_7_0", "generated_from_trainer"], "datasets": ["common_voice"], "model-index": [{"name": "", "results": []}]} | Mofe/speech-sprint-test | null | [
"transformers",
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"mozilla-foundation/common_voice_7_0",
"generated_from_trainer",
"ab",
"dataset:common_voice",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
automatic-speech-recognition | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
#
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - HA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4998
- Wer: 0.5153
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 9.6e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 2000
- num_epochs: 80.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.0021 | 8.33 | 500 | 2.9059 | 1.0 |
| 2.6604 | 16.66 | 1000 | 2.6402 | 0.9892 |
| 1.2216 | 24.99 | 1500 | 0.6051 | 0.6851 |
| 1.0754 | 33.33 | 2000 | 0.5408 | 0.6464 |
| 0.9582 | 41.66 | 2500 | 0.5521 | 0.5935 |
| 0.8653 | 49.99 | 3000 | 0.5156 | 0.5550 |
| 0.7867 | 58.33 | 3500 | 0.5439 | 0.5606 |
| 0.7265 | 66.66 | 4000 | 0.4863 | 0.5255 |
| 0.6699 | 74.99 | 4500 | 0.5050 | 0.5169 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.2+cu113
- Datasets 1.18.4.dev0
- Tokenizers 0.11.0
| {"language": ["ha"], "license": "apache-2.0", "tags": ["automatic-speech-recognition", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard"], "datasets": ["mozilla-foundation/common_voice_8_0"], "model-index": [{"name": "", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "Common Voice 8.0", "type": "mozilla-foundation/common_voice_8_0", "args": "ha"}, "metrics": [{"type": "wer", "value": 51.31, "name": "Test WER"}]}]}]} | Mofe/xls-r-hausa-40 | null | [
"transformers",
"pytorch",
"wav2vec2",
"automatic-speech-recognition",
"mozilla-foundation/common_voice_8_0",
"generated_from_trainer",
"robust-speech-event",
"hf-asr-leaderboard",
"ha",
"dataset:mozilla-foundation/common_voice_8_0",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
token-classification | spacy | | Feature | Description |
| --- | --- |
| **Name** | `en_pipeline` |
| **Version** | `0.0.0` |
| **spaCy** | `>=3.1.0,<3.2.0` |
| **Default Pipeline** | `tok2vec`, `tagger`, `parser`, `ner`, `attribute_ruler`, `lemmatizer` |
| **Components** | `tok2vec`, `tagger`, `parser`, `ner`, `attribute_ruler`, `lemmatizer` |
| **Vectors** | 0 keys, 0 unique vectors (0 dimensions) |
| **Sources** | n/a |
| **License** | n/a |
| **Author** | [n/a]() |
### Label Scheme
<details>
<summary>View label scheme (114 labels for 3 components)</summary>
| Component | Labels |
| --- | --- |
| **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, ```` |
| **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` |
| **`ner`** | `ARC`, `AST`, `BOOK`, `CAUSAL`, `COMPARISON`, `DATE`, `HEM`, `HOUR`, `HYPO`, `INSTRUMENT`, `JUDGEMENT`, `LAWS`, `MODEL`, `NAME`, `Observation`, `PAR`, `PLACE`, `QUANTITY`, `REASON`, `ZOD` |
</details>
### Accuracy
| Type | Score |
| --- | --- |
| `TAG_ACC` | 0.00 |
| `DEP_UAS` | 0.00 |
| `DEP_LAS` | 0.00 |
| `DEP_LAS_PER_TYPE` | 0.00 |
| `SENTS_P` | 100.00 |
| `SENTS_R` | 100.00 |
| `SENTS_F` | 100.00 |
| `ENTS_F` | 99.32 |
| `ENTS_P` | 99.47 |
| `ENTS_R` | 99.17 |
| `LEMMA_ACC` | 0.00 |
| `NER_LOSS` | 7790.09 |
| {"language": ["en"], "tags": ["spacy", "token-classification"]} | MohaAM/en_pipeline | null | [
"spacy",
"token-classification",
"en",
"model-index",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | MohamedDanis/DialoGPT-small-harrypotter | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | utyuiue6 | {} | MohamedH/object | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | MohamedH/test11 | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | {} | MohamedHesham/bart_base_subset_cnn | null | [
"transformers",
"pytorch",
"bart",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text2text-generation | transformers | {} | MohamedHesham/bart_large_subset_cnn | null | [
"transformers",
"pytorch",
"bart",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
summarization | transformers | {"tags": ["summarization"], "datasets": ["CNN/Daily-mail"], "metrics": ["ROUGE"]} | MohamedZaitoon/T5-CNN | null | [
"transformers",
"pytorch",
"summarization",
"dataset:CNN/Daily-mail",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
summarization | transformers | {"tags": ["summarization"], "datasets": ["CNN/Daily-mail"], "metrics": ["ROUGE"]} | MohamedZaitoon/bart-fine-tune | null | [
"transformers",
"pytorch",
"jax",
"bart",
"text2text-generation",
"summarization",
"dataset:CNN/Daily-mail",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bertweet-finetuned-rbam
This model is a fine-tuned version of [vinai/bertweet-base](https://huggingface.co/vinai/bertweet-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3971
- F1: 0.6620
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.7138 | 1.0 | 1632 | 0.7529 | 0.6814 |
| 0.5692 | 2.0 | 3264 | 0.8473 | 0.6803 |
| 0.4126 | 3.0 | 4896 | 1.0029 | 0.6617 |
| 0.2854 | 4.0 | 6528 | 1.2167 | 0.6635 |
| 0.2007 | 5.0 | 8160 | 1.3971 | 0.6620 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"tags": ["generated_from_trainer"], "metrics": ["f1"], "model-index": [{"name": "bertweet-finetuned-rbam", "results": []}]} | MohammadABH/bertweet-finetuned-rbam | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# twitter-roberta-base-dec2021_rbam_fine_tuned
This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-dec2021](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8295
- Accuracy: 0.6777
- Precision: 0.6743
- Recall: 0.6777
- F1: 0.6753
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
| 0.8455 | 1.0 | 3264 | 0.7663 | 0.6661 | 0.6802 | 0.6661 | 0.6693 |
| 0.6421 | 2.0 | 6528 | 0.8295 | 0.6777 | 0.6743 | 0.6777 | 0.6753 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.10.0+cu111
- Datasets 2.0.0
- Tokenizers 0.11.6
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy", "precision", "recall", "f1"], "model-index": [{"name": "twitter-roberta-base-dec2021_rbam_fine_tuned", "results": []}]} | MohammadABH/twitter-roberta-base-dec2021_rbam_fine_tuned | null | [
"transformers",
"pytorch",
"roberta",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | MohammadBasheer/t5-small-finetuned-xsum | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
# Harry Potter DialoGPT Model | {"tags": ["conversational"]} | Mohsin272/DialoGPT-medium-harrypotter | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Mohta/DialoGPT-medium-harrypotter11 | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mohta/DialoGPT-small-harrypotter | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
名言推論モデル
| {"language": ["ja"]} | Momerio/meigen_generate_Japanese | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"ja",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# Harry Potter DialoGPT Model | {"tags": ["conversational"]} | Mona/DialoGPT-small-harrypotter | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Monerok/gerger | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | MonetCH/distilbert-base-uncased-finetuned-cola | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
# Model Trained Using AutoNLP
- Problem type: Multi-class Classification
- Model ID: 23044997
- CO2 Emissions (in grams): 4.819872182577655
## Validation Metrics
- Loss: 0.001594889909029007
- Accuracy: 0.9997478885667465
- Macro F1: 0.9991190902836993
- Micro F1: 0.9997478885667465
- Weighted F1: 0.9997476735518704
- Macro Precision: 0.9998014460161265
- Micro Precision: 0.9997478885667465
- Weighted Precision: 0.9997479944069787
- Macro Recall: 0.9984426545713851
- Micro Recall: 0.9997478885667465
- Weighted Recall: 0.9997478885667465
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/Monsia/autonlp-tweets-classification-23044997
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("Monsia/autonlp-tweets-classification-23044997", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("Monsia/autonlp-tweets-classification-23044997", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` | {"language": "en", "tags": "autonlp", "datasets": ["Monsia/autonlp-data-tweets-classification"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}], "co2_eq_emissions": 4.819872182577655} | Monsia/autonlp-tweets-classification-23044997 | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autonlp",
"en",
"dataset:Monsia/autonlp-data-tweets-classification",
"co2_eq_emissions",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
# camembert-fr-covid-tweet-classification
This model is a fine-tune checkpoint of [Yanzhu/bertweetfr-base](https://huggingface.co/Yanzhu/bertweetfr-base), fine-tuned on SST-2.
This model reaches an accuracy of 66.00% on the dev set.
In this dataset, given a tweet, the goal was to infer the underlying topic of the tweet by choosing from four topics classes:
- chiffres : this means, the tweet talk about statistics of covid.
- mesures : this means, the tweet talk about measures take by government of covid
- opinions : this means, the tweet talk about opinion of people like fake new.
- symptomes : this means, the tweet talk about symptoms or variant of covid.
- divers : or other
# Pipelining the Model
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
tokenizer = AutoTokenizer.from_pretrained("Monsia/camembert-fr-covid-tweet-classification")
model = AutoModelForSequenceClassification.from_pretrained("Monsia/camembert-fr-covid-tweet-classification")
nlp_topic_classif = transformers.pipeline('topics-classification', model = model, tokenizer = tokenizer)
nlp_topic_classif("tchai on est morts. on va se faire vacciner et ils vont contrôler comme les marionnettes avec des fils. d'après les '' ont dit ''...")
# Output: [{'label': 'opinions', 'score': 0.831]
```
| {"language": ["fr"], "license": "apache-2.0", "tags": ["classification"], "metrics": ["accuracy"], "widget": [{"text": "tchai on est morts. on va se faire vacciner et ils vont contr\u00f4ler comme les marionnettes avec des fils. d'apr\u00e8s les 'ont dit'..."}]} | Monsia/camembert-fr-covid-tweet-classification | null | [
"transformers",
"pytorch",
"camembert",
"text-classification",
"classification",
"fr",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | # camembert-fr-covid-tweet-sentiment-classification
This model is a fine-tune checkpoint of [Yanzhu/bertweetfr-base](https://huggingface.co/Yanzhu/bertweetfr-base), fine-tuned on SST-2.
This model reaches an accuracy of 71% on the dev set.
In this dataset, given a tweet, the goal was to infer the underlying topic of the tweet by choosing from four topics classes:
- 0 : negatif
- 1 : neutre
- 2 : positif
# Pipelining the Model
```python
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
tokenizer = AutoTokenizer.from_pretrained("data354/camembert-fr-covid-tweet-sentiment-classification")
model = AutoModelForSequenceClassification.from_pretrained("data354/camembert-fr-covid-tweet-sentiment-classification")
nlp_topic_classif = transformers.pipeline('topics-classification', model = model, tokenizer = tokenizer)
nlp_topic_classif("tchai on est morts. on va se faire vacciner et ils vont contrôler comme les marionnettes avec des fils. d'après les '' ont dit ''...")
# Output: [{'label': 'opinions', 'score': 0.831]
``` | {"language": ["fr"], "license": "apache-2.0", "tags": ["classification"], "metrics": ["accuracy"], "widget": [{"text": "tchai on est morts. on va se faire vacciner et ils vont contr\u00f4ler comme les marionnettes avec des fils. d'apr\u00e8s les 'ont dit'..."}]} | data354/camembert-fr-covid-tweet-sentiment-classification | null | [
"transformers",
"pytorch",
"camembert",
"text-classification",
"classification",
"fr",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
automatic-speech-recognition | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# test-model-lg-data
This model is a fine-tuned version of [Monsia/test-model-lg-data](https://huggingface.co/Monsia/test-model-lg-data) on the common_voice dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3354
- Wer: 0.4150
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 32
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 200
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.0236 | 0.67 | 100 | 0.4048 | 0.4222 |
| 0.0304 | 1.35 | 200 | 0.4266 | 0.4809 |
| 0.0545 | 2.03 | 300 | 0.4309 | 0.4735 |
| 0.0415 | 2.7 | 400 | 0.4269 | 0.4595 |
| 0.033 | 3.38 | 500 | 0.4085 | 0.4537 |
| 0.0328 | 4.05 | 600 | 0.3642 | 0.4224 |
| 0.0414 | 4.73 | 700 | 0.3354 | 0.4150 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu113
- Datasets 1.13.3
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["common_voice"], "model-index": [{"name": "test-model-lg-data", "results": []}]} | Monsia/test-model-lg-data | null | [
"transformers",
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"generated_from_trainer",
"dataset:common_voice",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Monsia/test-model | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Monsia/test-wav2vec2-asr | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Moo/distilbert-base-uncased-finetuned-squad | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Mood/distilbert-base-uncased-finetuned-conn | null | [
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-ner
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-ner", "results": []}]} | Mood/distilbert-base-uncased-finetuned-ner | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"token-classification",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:04+00:00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.