Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
text-classification | transformers | {} | TehranNLP-org/xlnet-base-cased-avg-mnli | null | [
"transformers",
"pytorch",
"xlnet",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | TehranNLP-org/xlnet-base-cased-avg-sst2-2e-5-21 | null | [
"transformers",
"pytorch",
"xlnet",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | TehranNLP-org/xlnet-base-cased-avg-sst2-2e-5-42 | null | [
"transformers",
"pytorch",
"xlnet",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | TehranNLP-org/xlnet-base-cased-avg-sst2-2e-5-63 | null | [
"transformers",
"pytorch",
"xlnet",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | Product Review Sentiment Classification
1. Label0 - Negative
2. Label1 - Positive
Trained so far on 20000 Balanced Positive and Negative Reviews | {} | Tejas003/distillbert_base_uncased_amazon_review_sentiment_300 | null | [
"transformers",
"tf",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-classification | transformers | {} | Tejas3/Xlnet_base_80 | null | [
"transformers",
"pytorch",
"xlnet",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_110_uncased_movie_genre | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_110_uncased_v1 | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_base_uncased_80 | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_base_uncased_80_all | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_base_uncased_80_equal | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | Tejas3/distillbert_base_uncased_amazon_food_review_300 | null | [
"transformers",
"tf",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers | {"tags": ["conversational"]} | Tejasvb/DialoGPT-small-rick | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | null | {"tags": ["conversational"]} | Tejasvb/DialogGPT-small-rick | null | [
"conversational",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Tejaswini/m2m100_1.2B-finetuned-es-to-en | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Tejaswini/mbart-large-50-finetuned-es-to-en | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text2text-generation | transformers | {} | Tejaswini/opus-mt-en-ro-finetuned-en-to-ro | null | [
"transformers",
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
fill-mask | transformers | {} | Temur/qartvelian-roberta-base | null | [
"transformers",
"jax",
"tensorboard",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
automatic-speech-recognition | transformers |
# Wav2Vec2-Large-XLSR-53-Georgian
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Georgian using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "ka", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("Temur/wav2vec2-Georgian-Daytona")
model = Wav2Vec2ForCTC.from_pretrained("Temur/wav2vec2-Georgian-Daytona")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Georgian test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "ka", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("Temur/wav2vec2-Georgian-Daytona")
model = Wav2Vec2ForCTC.from_pretrained("Temur/wav2vec2-Georgian-Daytona")
model.to("cuda")
chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“]' # TODO: adapt this list to include all special characters you removed from the data
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
\\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
\\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
\\twith torch.no_grad():
\\t\\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
\\tpred_ids = torch.argmax(logits, dim=-1)
\\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
\\treturn batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 48.34 %
## Training
The Common Voice `train`, `validation`, and ... datasets were used for training as well as ... and ... # TODO: adapt to state all the datasets that were used for training.
The script used for training can be found [here](https://github.com/huggingface/transformers/blob/master/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md) | {"language": "ka", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "metrics": ["wer"], "model-index": [{"name": "Georgian WAV2VEC2 Daytona", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice ka", "type": "common_voice", "args": "ka"}, "metrics": [{"type": "wer", "value": 48.34, "name": "Test WER"}]}]}]} | Temur/wav2vec2-Georgian-Daytona | null | [
"transformers",
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"audio",
"speech",
"xlsr-fine-tuning-week",
"ka",
"dataset:common_voice",
"license:apache-2.0",
"model-index",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | # GFPGAN (CVPR 2021)
[**Paper**](https://arxiv.org/abs/2101.04061) **|** [**Project Page**](https://xinntao.github.io/projects/gfpgan)    [English](README.md) **|** [简体中文](README_CN.md)
GitHub: https://github.com/TencentARC/GFPGAN
GFPGAN is a blind face restoration algorithm towards real-world face images.
<a href="https://colab.research.google.com/drive/1sVsoBd9AjckIXThgtZhGrHRfFI6UUYOo"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="google colab logo"></a>
[Colab Demo](https://colab.research.google.com/drive/1sVsoBd9AjckIXThgtZhGrHRfFI6UUYOo)
### :book: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior
> [[Paper](https://arxiv.org/abs/2101.04061)]   [[Project Page](https://xinntao.github.io/projects/gfpgan)]   [Demo] <br>
> [Xintao Wang](https://xinntao.github.io/), [Yu Li](https://yu-li.github.io/), [Honglun Zhang](https://scholar.google.com/citations?hl=en&user=KjQLROoAAAAJ), [Ying Shan](https://scholar.google.com/citations?user=4oXBp9UAAAAJ&hl=en) <br>
> Applied Research Center (ARC), Tencent PCG
#### Abstract
Blind face restoration usually relies on facial priors, such as facial geometry prior or reference prior, to restore realistic and faithful details. However, very low-quality inputs cannot offer accurate geometric prior while high-quality references are inaccessible, limiting the applicability in real-world scenarios. In this work, we propose GFP-GAN that leverages **rich and diverse priors encapsulated in a pretrained face GAN** for blind face restoration. This Generative Facial Prior (GFP) is incorporated into the face restoration process via novel channel-split spatial feature transform layers, which allow our method to achieve a good balance of realness and fidelity. Thanks to the powerful generative facial prior and delicate designs, our GFP-GAN could jointly restore facial details and enhance colors with just a single forward pass, while GAN inversion methods require expensive image-specific optimization at inference. Extensive experiments show that our method achieves superior performance to prior art on both synthetic and real-world datasets.
#### BibTeX
@InProceedings{wang2021gfpgan,
author = {Xintao Wang and Yu Li and Honglun Zhang and Ying Shan},
title = {Towards Real-World Blind Face Restoration with Generative Facial Prior},
booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2021}
}
<p align="center">
<img src="https://xinntao.github.io/projects/GFPGAN_src/gfpgan_teaser.jpg">
</p>
---
## :wrench: Dependencies and Installation
- Python >= 3.7 (Recommend to use [Anaconda](https://www.anaconda.com/download/#linux) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html))
- [PyTorch >= 1.7](https://pytorch.org/)
- NVIDIA GPU + [CUDA](https://developer.nvidia.com/cuda-downloads)
### Installation
1. Clone repo
```bash
git clone https://github.com/xinntao/GFPGAN.git
cd GFPGAN
```
1. Install dependent packages
```bash
# Install basicsr - https://github.com/xinntao/BasicSR
# We use BasicSR for both training and inference
# Set BASICSR_EXT=True to compile the cuda extensions in the BasicSR - It may take several minutes to compile, please be patient
BASICSR_EXT=True pip install basicsr
# Install facexlib - https://github.com/xinntao/facexlib
# We use face detection and face restoration helper in the facexlib package
pip install facexlib
pip install -r requirements.txt
```
## :zap: Quick Inference
Download pre-trained models: [GFPGANv1.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth)
```bash
wget https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/GFPGANv1.pth -P experiments/pretrained_models
```
```bash
python inference_gfpgan_full.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/whole_imgs
# for aligned images
python inference_gfpgan_full.py --model_path experiments/pretrained_models/GFPGANv1.pth --test_path inputs/cropped_faces --aligned
```
## :computer: Training
We provide complete training codes for GFPGAN. <br>
You could improve it according to your own needs.
1. Dataset preparation: [FFHQ](https://github.com/NVlabs/ffhq-dataset)
1. Download pre-trained models and other data. Put them in the `experiments/pretrained_models` folder.
1. [Pretrained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth)
1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth)
1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth)
1. Modify the configuration file `train_gfpgan_v1.yml` accordingly.
1. Training
> python -m torch.distributed.launch --nproc_per_node=4 --master_port=22021 train.py -opt train_gfpgan_v1.yml --launcher pytorch
## :scroll: License and Acknowledgement
GFPGAN is realeased under Apache License Version 2.0.
## :e-mail: Contact
If you have any question, please email `[email protected]` or `[email protected]`.
| {} | TencentARC/GFPGANv1 | null | [
"arxiv:2101.04061",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Tender/distilbert-base-uncased-finetuned-cola | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Teng/distilbert-base-uncased-finetuned-squad | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
fill-mask | transformers | {} | TerenceAmil/bert_cn_finetuning | null | [
"transformers",
"pytorch",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TerenceAmil/bert_funting_test | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
Note: **default code snippet above won't work** because we are using `AlbertTokenizer` with `GPT2LMHeadModel`, see [issue](https://github.com/huggingface/transformers/issues/4285).
## GPT2 124M Trained on Ukranian Fiction
### Training details
Model was trained on corpus of 4040 fiction books, 2.77 GiB in total.
Evaluation on [brown-uk](https://github.com/brown-uk/corpus) gives perplexity of 50.16.
### Example usage:
```python
from transformers import AlbertTokenizer, GPT2LMHeadModel
tokenizer = AlbertTokenizer.from_pretrained("Tereveni-AI/gpt2-124M-uk-fiction")
model = GPT2LMHeadModel.from_pretrained("Tereveni-AI/gpt2-124M-uk-fiction")
input_ids = tokenizer.encode("Но зла Юнона, суча дочка,", add_special_tokens=False, return_tensors='pt')
outputs = model.generate(
input_ids,
do_sample=True,
num_return_sequences=3,
max_length=50
)
for i, out in enumerate(outputs):
print("{}: {}".format(i, tokenizer.decode(out)))
```
Prints something like this:
```bash
0: Но зла Юнона, суча дочка, яка затьмарила всі її таємниці: І хто з'їсть її душу, той помре». І, не дочекавшись гніву богів, посунула в пітьму, щоб не бачити перед собою. Але, за
1: Но зла Юнона, суча дочка, і довела мене до божевілля. Але він не знав нічого. Після того як я його побачив, мені стало зле. Я втратив рівновагу. Але в мене не було часу на роздуми. Я вже втратив надію
2: Но зла Юнона, суча дочка, не нарікала нам! — раптом вигукнула Юнона. — Це ти, старий йолопе! — мовила вона, не перестаючи сміятись. — Хіба ти не знаєш, що мені подобається ходити з тобою?
``` | {"language": "uk", "tags": ["text-generation"], "widget": [{"text": "\u041d\u043e \u0437\u043b\u0430 \u042e\u043d\u043e\u043d\u0430, \u0441\u0443\u0447\u0430 \u0434\u043e\u0447\u043a\u0430, "}]} | Tereveni-AI/gpt2-124M-uk-fiction | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"uk",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Terrenc3/Batman_Bot | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Tester/distilbert-base-uncased-finetuned-squad | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers | {} | ThaiUWA/gpt-2-josh-uwa | null | [
"transformers",
"pytorch",
"tf",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers | {} | ThaiUWA/gpt2test | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | ThaiUWA/py_just_rumour | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {"license": "afl-3.0"} | Thais/hf_nlp | null | [
"license:afl-3.0",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
automatic-speech-recognition | transformers |
# Wav2Vec2-Large-XLSR-53-Tamil
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Tamil using the [Common Voice](https://huggingface.co/datasets/common_voice) dataset.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site.
processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
\\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Tamil test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "{lang_id}", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site.
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic`
model.to("cuda")
chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“]' # TODO: adapt this list to include all special characters you removed from the data
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
\\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
\\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
\\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
\\twith torch.no_grad():
\\t\\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
\\tpred_ids = torch.argmax(logits, dim=-1)
\\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
\\treturn batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 100.00 %
## Training
The Common Voice `train`, `validation` were used for training
The script used for training can be found [https://colab.research.google.com/drive/1PC2SjxpcWMQ2qmRw21NbP38wtQQUa5os#scrollTo=YKBZdqqJG9Tv](...) | {"language": "ta", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "metrics": ["wer"], "model-index": [{"name": "thanish wav2vec2-large-xlsr-tamil", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice ta", "type": "common_voice", "args": "ta"}, "metrics": [{"type": "wer", "value": 100.0, "name": "Test WER"}]}]}]} | Thanish/wav2vec2-large-xlsr-tamil | null | [
"transformers",
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"audio",
"speech",
"xlsr-fine-tuning-week",
"ta",
"dataset:common_voice",
"license:apache-2.0",
"model-index",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Thargyi/Arcane | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | ThatBomberDev/AITests | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | ThatSkyFox/DialoGPT-large-joshuaBot | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
This is an improved version of the Joshua bot
| {"tags": ["conversational"]} | ThatSkyFox/DialoGPT-medium-joshua | null | [
"transformers",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers |
#This is a chatbot trained on the transcript of the game "The World Ends with You" | {"tags": ["conversational"]} | ThatSkyFox/DialoGPT-small-joshua | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | ThatSkyFox/JoshuaBot | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | ThatZoeGamer/tzgchatbot2.0 | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Thatianexsilva/That | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Thayyy/Thayyy | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | Thazren/Thazzy | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | The-Data-Hound/bacteria_lamp_network | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | The-Programmer-With-Cool-Pens/DialoGPT-small-tifa | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
# Tifa DialoGPT Model | {"tags": ["conversational"]} | The-Programmer-With-Cool-Pens/TifaBotAIPackage | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | TheArticle/wav2vec2-large-xls-r-300m-turkish-colab | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers | ruGPT3-small model, trained on some 2chan posts
| {} | TheBakerCat/2chan_ruGPT3_small | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | TheBronxTexan/first_test | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
#Joshua | {"tags": ["conversational"]} | TheCatsMoo/DialoGGPT-small-joshua | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | TheDarkLord/DialoGpt-small-tony | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheDiamondKing/DialoGPT-Small-HarryPotterBot | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheDiamondKing/DialoGPT-med-rick-and-morty-v3 | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheDiamondKing/DialoGPT-med-rick-and-morty | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheDiamondKing/DialoGPT-medium-rick-and-morty | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
# A Talking AI made with GPT2 trained with Harry Potter transcripts
## Currently working on Text to speech and speech recognition
## Likes to say "i'm not a wizard" | {"tags": ["conversational"]} | TheDiamondKing/DialoGPT-small-harrypotter | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | TheDiamondKing/DialoGPT-small-rick-and-morty | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheFarGG/ChuckAI | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheHolyTachanka/DialoGPT-small-rick | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheHolyTachanka/rickbot | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/MIMIC-III-t5-large-v1 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text2text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-toxic
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1295
- Rouge1: 93.7659
- Rouge2: 3.6618
- Rougel: 93.7652
- Rougelsum: 93.7757
- Gen Len: 2.5481
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| 0.1595 | 1.0 | 7979 | 0.1295 | 93.7659 | 3.6618 | 93.7652 | 93.7757 | 2.5481 |
### Framework versions
- Transformers 4.9.1
- Pytorch 1.9.0+cu102
- Datasets 1.11.0
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["rouge"], "model_index": [{"name": "t5-small-finetuned-toxic", "results": [{"task": {"name": "Sequence-to-sequence Language Modeling", "type": "text2text-generation"}, "metric": {"name": "Rouge1", "type": "rouge", "value": 93.7659}}]}]} | TheLongSentance/t5-small-finetuned-toxic | null | [
"transformers",
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-xsum
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the xsum dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3833
- Rouge1: 29.6452
- Rouge2: 8.6953
- Rougel: 23.4474
- Rougelsum: 23.4553
- Gen Len: 18.8037
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:------:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| 2.6051 | 1.0 | 102023 | 2.3833 | 29.6452 | 8.6953 | 23.4474 | 23.4553 | 18.8037 |
### Framework versions
- Transformers 4.9.0
- Pytorch 1.9.0+cu102
- Datasets 1.10.2
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["xsum"], "metrics": ["rouge"], "model_index": [{"name": "t5-small-finetuned-xsum", "results": [{"task": {"name": "Sequence-to-sequence Language Modeling", "type": "text2text-generation"}, "dataset": {"name": "xsum", "type": "xsum", "args": "default"}, "metric": {"name": "Rouge1", "type": "rouge", "value": 29.6452}}]}]} | TheLongSentance/t5-small-finetuned-xsum | null | [
"transformers",
"pytorch",
"tensorboard",
"t5",
"text2text-generation",
"generated_from_trainer",
"dataset:xsum",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text2text-generation | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5_large_baseline
This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0010
- Rouge1: 99.8958
- Rouge2: 99.8696
- Rougel: 99.8958
- Rougelsum: 99.8958
- Gen Len: 46.715
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adafactor
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 0.9852 | 0.33 | 50 | 0.1098 | 55.1421 | 49.8248 | 54.4294 | 54.7377 | 19.0 |
| 0.1186 | 0.67 | 100 | 0.0176 | 58.0994 | 54.8973 | 57.7383 | 57.9538 | 19.0 |
| 0.0417 | 1.0 | 150 | 0.0057 | 58.3685 | 55.7353 | 58.279 | 58.2729 | 19.0 |
| 0.0225 | 1.33 | 200 | 0.0029 | 58.8981 | 56.2457 | 58.8202 | 58.7906 | 19.0 |
| 0.0131 | 1.67 | 250 | 0.0024 | 58.8439 | 56.2535 | 58.7557 | 58.7218 | 19.0 |
| 0.0112 | 2.0 | 300 | 0.0013 | 58.9538 | 56.4749 | 58.9322 | 58.8817 | 19.0 |
| 0.0077 | 2.33 | 350 | 0.0013 | 58.9538 | 56.4749 | 58.9322 | 58.8817 | 19.0 |
| 0.0043 | 2.67 | 400 | 0.0010 | 59.0124 | 56.5806 | 58.9867 | 58.9342 | 19.0 |
| 0.0052 | 3.0 | 450 | 0.0010 | 59.0402 | 56.6982 | 59.0385 | 58.986 | 19.0 |
### Framework versions
- Transformers 4.10.0.dev0
- Pytorch 1.9.0+cu111
- Datasets 1.11.0
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["rouge"], "model_index": [{"name": "t5_large_baseline", "results": [{"task": {"name": "Summarization", "type": "summarization"}, "metric": {"name": "Rouge1", "type": "rouge", "value": 99.8958}}]}]} | TheLongSentance/t5_large_baseline | null | [
"transformers",
"pytorch",
"t5",
"text2text-generation",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt10000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt15000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt150000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt20000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt225000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt25000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt30000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt5000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_final_chkpnt75000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr1e4c_chkpnt20000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_1_nbn_lr3e4c_chkpnt20000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt20000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_chkpnt5000 | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
feature-extraction | transformers | {} | TheLongSentance/t5_mimic_nt1_1m_tk200_r2p5_c15_sp1_3_nbn_lr3e4c | null | [
"transformers",
"pytorch",
"t5",
"feature-extraction",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
null | null | {} | TheLongSentance/tst-multilabelclass | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
# Harry DialoGPT Model | {"tags": ["conversational"]} | ThePeachOx/DialoGPT-small-harry | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
fill-mask | transformers | EconBERTa - RoBERTa further trained for 25k steps (T=512, batch_size = 256) on text sourced from economics books.
Example usage for MLM:
```python
from transformers import RobertaTokenizer, RobertaForMaskedLM
from transformers import pipeline
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('models').cpu()
model.eval()
mlm = pipeline('fill-mask', model = model, tokenizer = tokenizer)
test = "ECB - euro, FED - <mask>, BoJ - yen"
print(mlm(test)[:2])
[{'sequence': 'ECB - euro, FED - dollar, BoJ - yen',
'score': 0.7342271208763123,
'token': 1404,
'token_str': ' dollar'},
{'sequence': 'ECB - euro, FED - dollars, BoJ - yen',
'score': 0.10828445851802826,
'token': 1932,
'token_str': ' dollars'}]
```
| {} | ThePixOne/EconBERTa | null | [
"transformers",
"pytorch",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | ThePixOne/chaqt | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
fill-mask | transformers | BERT finetuned on wallstreetbets subreddit | {} | ThePixOne/retBERT | null | [
"transformers",
"pytorch",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | null |
#Rick DialoGPT Model | {"tags": ["conversational"]} | TheReverendWes/DialoGPT-small-rick | null | [
"conversational",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | TheSkrill/ChatBot-CS-583 | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
# Hemione Chat Bot | {"tags": ["conversational"]} | TheTUFGuy/HermioneChatBot | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Theivaprakasham/Speech2Text-base-timit-demo-colab | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-cased-twitter_sentiment
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6907
- Accuracy: 0.7132
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-06
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 0.8901 | 1.0 | 1387 | 0.8592 | 0.6249 |
| 0.8085 | 2.0 | 2774 | 0.7600 | 0.6822 |
| 0.7336 | 3.0 | 4161 | 0.7170 | 0.6915 |
| 0.6938 | 4.0 | 5548 | 0.7018 | 0.7016 |
| 0.6738 | 5.0 | 6935 | 0.6926 | 0.7067 |
| 0.6496 | 6.0 | 8322 | 0.6910 | 0.7088 |
| 0.6599 | 7.0 | 9709 | 0.6902 | 0.7088 |
| 0.631 | 8.0 | 11096 | 0.6910 | 0.7095 |
| 0.6327 | 9.0 | 12483 | 0.6925 | 0.7146 |
| 0.6305 | 10.0 | 13870 | 0.6907 | 0.7132 |
### Framework versions
- Transformers 4.12.5
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "bert-base-cased-twitter_sentiment", "results": []}]} | Theivaprakasham/bert-base-cased-twitter_sentiment | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Theivaprakasham/bert-base-cased-twitter_sentimenttest_trainer | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# layoutlmv2-finetuned-sroie
This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on the sroie dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0291
- Address Precision: 0.9341
- Address Recall: 0.9395
- Address F1: 0.9368
- Address Number: 347
- Company Precision: 0.9570
- Company Recall: 0.9625
- Company F1: 0.9598
- Company Number: 347
- Date Precision: 0.9885
- Date Recall: 0.9885
- Date F1: 0.9885
- Date Number: 347
- Total Precision: 0.9253
- Total Recall: 0.9280
- Total F1: 0.9266
- Total Number: 347
- Overall Precision: 0.9512
- Overall Recall: 0.9546
- Overall F1: 0.9529
- Overall Accuracy: 0.9961
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- training_steps: 3000
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Address Precision | Address Recall | Address F1 | Address Number | Company Precision | Company Recall | Company F1 | Company Number | Date Precision | Date Recall | Date F1 | Date Number | Total Precision | Total Recall | Total F1 | Total Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-----------------:|:--------------:|:----------:|:--------------:|:--------------:|:-----------:|:-------:|:-----------:|:---------------:|:------------:|:--------:|:------------:|:-----------------:|:--------------:|:----------:|:----------------:|
| No log | 0.05 | 157 | 0.8162 | 0.3670 | 0.7233 | 0.4869 | 347 | 0.0617 | 0.0144 | 0.0234 | 347 | 0.0 | 0.0 | 0.0 | 347 | 0.0 | 0.0 | 0.0 | 347 | 0.3346 | 0.1844 | 0.2378 | 0.9342 |
| No log | 1.05 | 314 | 0.3490 | 0.8564 | 0.8934 | 0.8745 | 347 | 0.8610 | 0.9280 | 0.8932 | 347 | 0.7297 | 0.8559 | 0.7878 | 347 | 0.0 | 0.0 | 0.0 | 347 | 0.8128 | 0.6693 | 0.7341 | 0.9826 |
| No log | 2.05 | 471 | 0.1845 | 0.7970 | 0.9049 | 0.8475 | 347 | 0.9211 | 0.9424 | 0.9316 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.0 | 0.0 | 0.0 | 347 | 0.8978 | 0.7089 | 0.7923 | 0.9835 |
| 0.7027 | 3.05 | 628 | 0.1194 | 0.9040 | 0.9222 | 0.9130 | 347 | 0.8880 | 0.9135 | 0.9006 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.0 | 0.0 | 0.0 | 347 | 0.9263 | 0.7061 | 0.8013 | 0.9853 |
| 0.7027 | 4.05 | 785 | 0.0762 | 0.9397 | 0.9424 | 0.9410 | 347 | 0.8889 | 0.9222 | 0.9052 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.7740 | 0.9078 | 0.8355 | 347 | 0.8926 | 0.9402 | 0.9158 | 0.9928 |
| 0.7027 | 5.05 | 942 | 0.0564 | 0.9282 | 0.9308 | 0.9295 | 347 | 0.9296 | 0.9510 | 0.9402 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.7801 | 0.8588 | 0.8176 | 347 | 0.9036 | 0.9323 | 0.9177 | 0.9946 |
| 0.0935 | 6.05 | 1099 | 0.0548 | 0.9222 | 0.9222 | 0.9222 | 347 | 0.6975 | 0.7378 | 0.7171 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.8608 | 0.8732 | 0.8670 | 347 | 0.8648 | 0.8804 | 0.8725 | 0.9921 |
| 0.0935 | 7.05 | 1256 | 0.0410 | 0.92 | 0.9280 | 0.9240 | 347 | 0.9486 | 0.9568 | 0.9527 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9091 | 0.9222 | 0.9156 | 347 | 0.9414 | 0.9488 | 0.9451 | 0.9961 |
| 0.0935 | 8.05 | 1413 | 0.0369 | 0.9368 | 0.9395 | 0.9381 | 347 | 0.9569 | 0.9597 | 0.9583 | 347 | 0.9772 | 0.9885 | 0.9828 | 347 | 0.9143 | 0.9222 | 0.9182 | 347 | 0.9463 | 0.9524 | 0.9494 | 0.9960 |
| 0.038 | 9.05 | 1570 | 0.0343 | 0.9282 | 0.9308 | 0.9295 | 347 | 0.9624 | 0.9597 | 0.9610 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9206 | 0.9020 | 0.9112 | 347 | 0.9500 | 0.9452 | 0.9476 | 0.9958 |
| 0.038 | 10.05 | 1727 | 0.0317 | 0.9395 | 0.9395 | 0.9395 | 347 | 0.9598 | 0.9625 | 0.9612 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9280 | 0.9280 | 0.9280 | 347 | 0.9539 | 0.9546 | 0.9543 | 0.9963 |
| 0.038 | 11.05 | 1884 | 0.0312 | 0.9368 | 0.9395 | 0.9381 | 347 | 0.9514 | 0.9597 | 0.9555 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9226 | 0.9280 | 0.9253 | 347 | 0.9498 | 0.9539 | 0.9518 | 0.9960 |
| 0.0236 | 12.05 | 2041 | 0.0318 | 0.9368 | 0.9395 | 0.9381 | 347 | 0.9570 | 0.9625 | 0.9598 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9043 | 0.8991 | 0.9017 | 347 | 0.9467 | 0.9474 | 0.9471 | 0.9956 |
| 0.0236 | 13.05 | 2198 | 0.0291 | 0.9337 | 0.9337 | 0.9337 | 347 | 0.9598 | 0.9625 | 0.9612 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9164 | 0.9164 | 0.9164 | 347 | 0.9496 | 0.9503 | 0.9499 | 0.9960 |
| 0.0236 | 14.05 | 2355 | 0.0300 | 0.9286 | 0.9366 | 0.9326 | 347 | 0.9459 | 0.9568 | 0.9513 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9275 | 0.9222 | 0.9249 | 347 | 0.9476 | 0.9510 | 0.9493 | 0.9959 |
| 0.0178 | 15.05 | 2512 | 0.0307 | 0.9366 | 0.9366 | 0.9366 | 347 | 0.9513 | 0.9568 | 0.9540 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9275 | 0.9222 | 0.9249 | 347 | 0.9510 | 0.9510 | 0.9510 | 0.9959 |
| 0.0178 | 16.05 | 2669 | 0.0300 | 0.9312 | 0.9366 | 0.9339 | 347 | 0.9543 | 0.9625 | 0.9584 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9171 | 0.9251 | 0.9211 | 347 | 0.9477 | 0.9532 | 0.9504 | 0.9959 |
| 0.0178 | 17.05 | 2826 | 0.0292 | 0.9368 | 0.9395 | 0.9381 | 347 | 0.9570 | 0.9625 | 0.9598 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9253 | 0.9280 | 0.9266 | 347 | 0.9519 | 0.9546 | 0.9532 | 0.9961 |
| 0.0178 | 18.05 | 2983 | 0.0291 | 0.9341 | 0.9395 | 0.9368 | 347 | 0.9570 | 0.9625 | 0.9598 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9253 | 0.9280 | 0.9266 | 347 | 0.9512 | 0.9546 | 0.9529 | 0.9961 |
| 0.0149 | 19.01 | 3000 | 0.0291 | 0.9341 | 0.9395 | 0.9368 | 347 | 0.9570 | 0.9625 | 0.9598 | 347 | 0.9885 | 0.9885 | 0.9885 | 347 | 0.9253 | 0.9280 | 0.9266 | 347 | 0.9512 | 0.9546 | 0.9529 | 0.9961 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.8.0+cu101
- Datasets 1.18.4.dev0
- Tokenizers 0.11.6
| {"license": "cc-by-nc-sa-4.0", "tags": ["generated_from_trainer"], "datasets": ["sroie"], "model-index": [{"name": "layoutlmv2-finetuned-sroie", "results": []}]} | Theivaprakasham/layoutlmv2-finetuned-sroie | null | [
"transformers",
"pytorch",
"tensorboard",
"layoutlmv2",
"token-classification",
"generated_from_trainer",
"dataset:sroie",
"license:cc-by-nc-sa-4.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
token-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# layoutlmv2-finetuned-sroie_mod
This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- training_steps: 3000
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.16.2
- Pytorch 1.8.0+cu101
- Datasets 1.18.3
- Tokenizers 0.11.0
| {"license": "cc-by-nc-sa-4.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "layoutlmv2-finetuned-sroie_mod", "results": []}]} | Theivaprakasham/layoutlmv2-finetuned-sroie_mod | null | [
"transformers",
"pytorch",
"tensorboard",
"layoutlmv2",
"token-classification",
"generated_from_trainer",
"license:cc-by-nc-sa-4.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# sentence-transformers-msmarco-distilbert-base-tas-b-twitter_sentiment
This model is a fine-tuned version of [sentence-transformers/msmarco-distilbert-base-tas-b](https://huggingface.co/sentence-transformers/msmarco-distilbert-base-tas-b) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6954
- Accuracy: 0.7146
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-06
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 20
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 0.8892 | 1.0 | 1387 | 0.8472 | 0.6180 |
| 0.7965 | 2.0 | 2774 | 0.7797 | 0.6609 |
| 0.7459 | 3.0 | 4161 | 0.7326 | 0.6872 |
| 0.7096 | 4.0 | 5548 | 0.7133 | 0.6995 |
| 0.6853 | 5.0 | 6935 | 0.6998 | 0.7002 |
| 0.6561 | 6.0 | 8322 | 0.6949 | 0.7059 |
| 0.663 | 7.0 | 9709 | 0.6956 | 0.7077 |
| 0.6352 | 8.0 | 11096 | 0.6890 | 0.7164 |
| 0.6205 | 9.0 | 12483 | 0.6888 | 0.7117 |
| 0.6203 | 10.0 | 13870 | 0.6871 | 0.7121 |
| 0.6005 | 11.0 | 15257 | 0.6879 | 0.7171 |
| 0.5985 | 12.0 | 16644 | 0.6870 | 0.7139 |
| 0.5839 | 13.0 | 18031 | 0.6882 | 0.7164 |
| 0.5861 | 14.0 | 19418 | 0.6910 | 0.7124 |
| 0.5732 | 15.0 | 20805 | 0.6916 | 0.7153 |
| 0.5797 | 16.0 | 22192 | 0.6947 | 0.7110 |
| 0.5565 | 17.0 | 23579 | 0.6930 | 0.7175 |
| 0.5636 | 18.0 | 24966 | 0.6959 | 0.7106 |
| 0.5642 | 19.0 | 26353 | 0.6952 | 0.7132 |
| 0.5717 | 20.0 | 27740 | 0.6954 | 0.7146 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "sentence-transformers-msmarco-distilbert-base-tas-b-twitter_sentiment", "results": []}]} | Theivaprakasham/sentence-transformers-msmarco-distilbert-base-tas-b-twitter_sentiment | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-classification | transformers | {} | Theivaprakasham/sentence-transformers-paraphrase-MiniLM-L6-v2-twitter_sentiment | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
automatic-speech-recognition | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-timit-demo-colab
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4475
- Wer: 0.3400
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 3.6929 | 4.0 | 500 | 2.4485 | 1.0009 |
| 0.9441 | 8.0 | 1000 | 0.4848 | 0.4758 |
| 0.3016 | 12.0 | 1500 | 0.4464 | 0.4016 |
| 0.1715 | 16.0 | 2000 | 0.4666 | 0.3765 |
| 0.1277 | 20.0 | 2500 | 0.4340 | 0.3515 |
| 0.1082 | 24.0 | 3000 | 0.4544 | 0.3495 |
| 0.0819 | 28.0 | 3500 | 0.4475 | 0.3400 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.13.3
- Tokenizers 0.10.3
| {"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "wav2vec2-base-timit-demo-colab", "results": []}]} | Theivaprakasham/wav2vec2-base-timit-demo-colab | null | [
"transformers",
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"generated_from_trainer",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | Theivaprakasham/wav2vec2-base-timit-demo | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-generation | transformers |
#Stewie DialoGPT Model | {"tags": ["conversational"]} | Thejas/DialoGPT-small-Stewei | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
text-generation | transformers |
#Elon Musk DialoGPT Model | {"tags": ["conversational"]} | Thejas/DialoGPT-small-elon | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
null | null | {} | ThibaudL/test | null | [
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | ThinkCERCA/claim_hugging | null | [
"transformers",
"tf",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | ThinkCERCA/counterargument_hugging | null | [
"transformers",
"tf",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
|
text-classification | transformers | {} | ThinkCERCA/evidence_hugging | null | [
"transformers",
"tf",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
]
| null | 2022-03-02T23:29:05+00:00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.