Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
text2text-generation
|
transformers
|
## CALM
This model is for ICLR2021 paper: [Pre-training Text-to-Text Transformers for Concept-centric Common Sense](https://openreview.net/forum?id=3k20LAiHYL2).
Checkout our [Project website](https://inklab.usc.edu/calm-project) for details!
```bibtex
@inproceedings{CALM2021,
title={Pre-training Text-to-Text Transformers for Concept-centric Common Sense},
author={Wangchunshu Zhou and Dong-Ho Lee and Ravi Kiran Selvam and Seyeon Lee and Bill Yuchen Lin and Xiang Ren},
booktitle={ICLR},
year={2021}
}
```
|
{}
|
danny911kr/calm-mix-base
| null |
[
"transformers",
"pytorch",
"t5",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text2text-generation
|
transformers
|
## CALM
This model is for ICLR2021 paper: [Pre-training Text-to-Text Transformers for Concept-centric Common Sense](https://openreview.net/forum?id=3k20LAiHYL2).
Checkout our [Project website](https://inklab.usc.edu/calm-project) for details!
```bibtex
@inproceedings{CALM2021,
title={Pre-training Text-to-Text Transformers for Concept-centric Common Sense},
author={Wangchunshu Zhou and Dong-Ho Lee and Ravi Kiran Selvam and Seyeon Lee and Bill Yuchen Lin and Xiang Ren},
booktitle={ICLR},
year={2021}
}
```
|
{}
|
danny911kr/calm-mix-large
| null |
[
"transformers",
"pytorch",
"t5",
"text2text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
feature-extraction
|
transformers
|
{}
|
danny911kr/tapas_simsiam_mlm_1
| null |
[
"transformers",
"pytorch",
"tapas",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
feature-extraction
|
transformers
|
{}
|
danny911kr/tapas_simsiam_mlm_2
| null |
[
"transformers",
"pytorch",
"tapas",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
dansbecker/my-test-repo
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/Eddie_neo_1.3train
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/Eddie_neo_j11
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/Eddie_neo_j6
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/RuGPT3_german20
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex-gpt-L
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex-gpt-doc2text
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex-gpt-finetune
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex-gpt2000
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
danurahul/alex-gpt3
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
danurahul/alex-gptn125
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex_gpt3_Doctextfull
| null |
[
"transformers",
"pytorch",
"jax",
"safetensors",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex_gpt3_Doctextfull2
| null |
[
"transformers",
"pytorch",
"jax",
"safetensors",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/alex_gpt3_endoftext
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/distil
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
danurahul/distilbert-base-uncased-finetuned-cola
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/doc2txt_model2
| null |
[
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/german_gpt_4g
| null |
[
"transformers",
"pytorch",
"jax",
"safetensors",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/ghosh_dentist
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/ghosh_dentist_med
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/gptneo_tarot
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
automatic-speech-recognition
|
transformers
|
# Wav2Vec2-Large-XLSR-53-or
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on odia using the [Common Voice](https://huggingface.co/datasets/common_voice)
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "or", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("danurahul/wav2vec2-large-xlsr-or")
model = Wav2Vec2ForCTC.from_pretrained("danurahul/wav2vec2-large-xlsr-or")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
\tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the odia test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "or", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("danurahul/wav2vec2-large-xlsr-or")
model = Wav2Vec2ForCTC.from_pretrained("danurahul/wav2vec2-large-xlsr-or")
model.to("cuda")
chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\“]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
\tbatch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
\tspeech_array, sampling_rate = torchaudio.load(batch["path"])
\tbatch["speech"] = resampler(speech_array).squeeze().numpy()
\treturn batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
\tinputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
\twith torch.no_grad():
\t\tlogits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
\tpred_ids = torch.argmax(logits, dim=-1)
\tbatch["pred_strings"] = processor.batch_decode(pred_ids)
\treturn batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 54.6 %
## Training
The Common Voice `train`, `validation`, and test datasets were used for training as well as prediction and testing
The script used for training can be found [https://github.com/rahul-art/wav2vec2_or]
|
{"language": "or", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "metrics": ["wer"], "model-index": [{"name": "odia XLSR Wav2Vec2 Large 2000", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice or", "type": "common_voice", "args": "or"}, "metrics": [{"type": "wer", "value": 54.6, "name": "Test WER"}]}]}]}
|
danurahul/wav2vec2-large-xlsr-or
| null |
[
"transformers",
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"audio",
"speech",
"xlsr-fine-tuning-week",
"or",
"dataset:common_voice",
"license:apache-2.0",
"model-index",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
automatic-speech-recognition
|
transformers
|
# Wav2Vec2-Large-XLSR-53-Punjabi
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Punjabi using the [Common Voice](https://huggingface.co/datasets/common_voice).
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "pa-IN", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("danurahul/wav2vec2-large-xlsr-pa-IN")
model = Wav2Vec2ForCTC.from_pretrained("danurahul/wav2vec2-large-xlsr-pa-IN")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Punjabi test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "pa-IN", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("danurahul/wav2vec2-large-xlsr-pa-IN")
model = Wav2Vec2ForCTC.from_pretrained("danurahul/wav2vec2-large-xlsr-pa-IN")
model.to("cuda")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\!\\\\\\\\\\\\\\\\-\\\\\\\\\\\\\\\\;\\\\\\\\\\\\\\\\:\\\\\\\\\\\\\\\\"\\\\\\\\\\\\\\\\“\\\\\\\\\\\\\\\\%\\\\\\\\\\\\\\\\‘\\\\\\\\\\\\\\\\”\\\\\\\\\\\\\\\\�]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 100 %
## Training
The Common Voice `train`, `validation` was used for training as well as validation and testing #
The script used for training can be found https://github.com/rahul-art/huggingface_wav2vec2_punjabi/blob/main/Fine_Tune_XLSR_Wav2Vec2_on_Punjabi_ASR_with_%F0%9F%A4%97_Transformers.ipynb
|
{"language": "pa-IN", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week"], "datasets": ["common_voice"], "metrics": ["wer"], "model-index": [{"name": "danurahul/wav2vec2-large-xlsr-pa-IN", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Common Voice pa-IN", "type": "common_voice", "args": "pa-IN"}, "metrics": [{"type": "wer", "value": 54.86, "name": "Test WER"}]}]}]}
|
danurahul/wav2vec2-large-xlsr-pa-IN
| null |
[
"transformers",
"pytorch",
"jax",
"wav2vec2",
"automatic-speech-recognition",
"audio",
"speech",
"xlsr-fine-tuning-week",
"dataset:common_voice",
"license:apache-2.0",
"model-index",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
{}
|
danurahul/yoav_gpt_neo1.3B
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/yoav_gpt_neo1.3B_delimiter
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
{}
|
danurahul/yoav_neo_spaces
| null |
[
"transformers",
"pytorch",
"gpt_neo",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-marc-en
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the amazon_reviews_multi dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9302
- Mae: 0.5
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Mae |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.1253 | 1.0 | 235 | 0.9756 | 0.5488 |
| 0.9465 | 2.0 | 470 | 0.9302 | 0.5 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["amazon_reviews_multi"], "model-index": [{"name": "xlm-roberta-base-finetuned-marc-en", "results": []}]}
|
danwilbury/xlm-roberta-base-finetuned-marc-en
| null |
[
"transformers",
"pytorch",
"tensorboard",
"xlm-roberta",
"text-classification",
"generated_from_trainer",
"dataset:amazon_reviews_multi",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
Sample usage:
```python
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("danyaljj/gpt2_question_answering_squad2")
input_ids = tokenizer.encode("There are two apples on the counter. Q: How many apples? A:", return_tensors="pt")
outputs = model.generate(input_ids)
print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
```
Which should produce this:
```
Generated: There are two apples on the counter. Q: How many apples? A: two
```
|
{}
|
danyaljj/gpt2_question_answering_squad2
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
Sample usage:
```python
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("danyaljj/gpt2_question_generation_given_paragraph")
input_ids = tokenizer.encode("There are two apples on the counter. Q:", return_tensors="pt")
outputs = model.generate(input_ids)
print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
```
Which should produce this:
```
Generated: There are two apples on the counter. Q: What is the name of the counter that is on
```
|
{}
|
danyaljj/gpt2_question_generation_given_paragraph
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
Sample usage:
```python
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("danyaljj/gpt2_question_generation_given_paragraph_answer")
input_ids = tokenizer.encode("There are two apples on the counter. A: apples Q:", return_tensors="pt")
outputs = model.generate(input_ids)
print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True))
```
Which should produce this:
```
Generated: There are two apples on the counter. A: apples Q: What is the name of the counter
```
|
{}
|
danyaljj/gpt2_question_generation_given_paragraph_answer
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null |
transformers
|
West et al.'s model from their "reflective decoding" paper.
Sample usage:
```python
import torch
from modeling_opengpt2 import OpenGPT2LMHeadModel
from padded_encoder import Encoder
path_to_backward = 'danyaljj/opengpt2_pytorch_backward'
encoder = Encoder()
model_backward = OpenGPT2LMHeadModel.from_pretrained(path_to_backward)
input = "until she finally won."
input_ids = encoder.encode(input)
input_ids = torch.tensor([input_ids[::-1] ], dtype=torch.int)
print(input_ids)
output = model_backward.generate(input_ids)
output_text = encoder.decode(output.tolist()[0][::-1])
print(output_text)
```
Download the additional files from here: https://github.com/peterwestuw/GPT2ForwardBackward
|
{}
|
danyaljj/opengpt2_pytorch_backward
| null |
[
"transformers",
"pytorch",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null |
transformers
|
West et al.'s model from their "reflective decoding" paper.
Sample usage:
```python
import torch
from modeling_opengpt2 import OpenGPT2LMHeadModel
from padded_encoder import Encoder
path_to_forward = 'danyaljj/opengpt2_pytorch_forward'
encoder = Encoder()
model_backward = OpenGPT2LMHeadModel.from_pretrained(path_to_forward)
input = "She tried to win but"
input_ids = encoder.encode(input)
input_ids = torch.tensor([input_ids ], dtype=torch.int)
print(input_ids)
output = model_backward.generate(input_ids)
output_text = encoder.decode(output.tolist()[0])
print(output_text)
```
Download the additional files from here: https://github.com/peterwestuw/GPT2ForwardBackward
|
{}
|
danyaljj/opengpt2_pytorch_forward
| null |
[
"transformers",
"pytorch",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilgpt2-finetuned-wikitext2
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Framework versions
- Transformers 4.12.3
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilgpt2-finetuned-wikitext2", "results": []}]}
|
daqiao202/distilgpt2-finetuned-wikitext2
| null |
[
"transformers",
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
daquarti/umita
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
dark-knight/output_dir_radiology_data
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
automatic-speech-recognition
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-timit-demo-colab
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 2
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.11.3
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.10.3
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "wav2vec2-base-timit-demo-colab", "results": []}]}
|
dark-knight/wav2vec2-base-timit-demo-colab
| null |
[
"transformers",
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"generated_from_trainer",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
darknesses/crowd-counting
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-classification
|
transformers
|
{}
|
darkzara/results
| null |
[
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
# Chicken Bot's Jon Snow DialoGPT Model
|
{"tags": ["conversational"]}
|
darkzek/chickenbot-jon-snow
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
# Pickle Rick DialoGPT Model
|
{"tags": ["conversational"]}
|
darthboii/DialoGPT-small-PickleRick
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
# Rick DialoGPT Model
|
{"tags": ["conversational"]}
|
darthboii/DialoGPT-small-Rick
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null |
transformers
|
Hi
|
{}
|
darubramha/hi-LyricsGPT2
| null |
[
"transformers",
"pytorch",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
dasdk350/Deepak
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
dash/dgs
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null |
transformers
|
https://github.com/monologg/JointBERT
|
{}
|
databuzzword/JointBERT-atis
| null |
[
"transformers",
"pytorch",
"bert",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null |
transformers
|
https://github.com/monologg/JointBERT
|
{}
|
databuzzword/JointBERT-snips
| null |
[
"transformers",
"pytorch",
"bert",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
databuzzword/aliostad-programming-language-detection
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/bringing-old-photos-back-to-life
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/deoldify-artistic
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/deoldify-stable
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/esrgan
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/mobile-net
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
databuzzword/xception
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
datashadi/wav2vec2-large-xls-r-300m-fa-colab
| null |
[
"tensorboard",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
feature-extraction
|
transformers
|
{}
|
datawhales/korean-relation-extraction
| null |
[
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-to-speech
|
tensorflowtts
|
# Tacotron 2 with Guided Attention trained on Synpaflex (Fr)
This repository provides a pretrained [Tacotron2](https://arxiv.org/abs/1712.05884) trained with [Guided Attention](https://arxiv.org/abs/1710.08969) on Synpaflex dataset (Fr). For a detail of the model, we encourage you to read more about
[TensorFlowTTS](https://github.com/TensorSpeech/TensorFlowTTS).
## Install TensorFlowTTS
First of all, please install TensorFlowTTS with the following command:
```
pip install TensorFlowTTS
```
### Converting your Text to Mel Spectrogram
```python
import numpy as np
import soundfile as sf
import yaml
import tensorflow as tf
from tensorflow_tts.inference import AutoProcessor
from tensorflow_tts.inference import TFAutoModel
processor = AutoProcessor.from_pretrained("tensorspeech/tts-tacotron2-synpaflex-fr")
tacotron2 = TFAutoModel.from_pretrained("tensorspeech/tts-tacotron2-synpaflex-fr")
text = "Oh, je voudrais tant que tu te souviennes Des jours heureux quand nous étions amis"
input_ids = processor.text_to_sequence(text)
decoder_output, mel_outputs, stop_token_prediction, alignment_history = tacotron2.inference(
input_ids=tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
input_lengths=tf.convert_to_tensor([len(input_ids)], tf.int32),
speaker_ids=tf.convert_to_tensor([0], dtype=tf.int32),
)
```
#### Referencing Tacotron 2
```
@article{DBLP:journals/corr/abs-1712-05884,
author = {Jonathan Shen and
Ruoming Pang and
Ron J. Weiss and
Mike Schuster and
Navdeep Jaitly and
Zongheng Yang and
Zhifeng Chen and
Yu Zhang and
Yuxuan Wang and
R. J. Skerry{-}Ryan and
Rif A. Saurous and
Yannis Agiomyrgiannakis and
Yonghui Wu},
title = {Natural {TTS} Synthesis by Conditioning WaveNet on Mel Spectrogram
Predictions},
journal = {CoRR},
volume = {abs/1712.05884},
year = {2017},
url = {http://arxiv.org/abs/1712.05884},
archivePrefix = {arXiv},
eprint = {1712.05884},
timestamp = {Thu, 28 Nov 2019 08:59:52 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1712-05884.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
#### Referencing TensorFlowTTS
```
@misc{TFTTS,
author = {Minh Nguyen, Alejandro Miguel Velasquez, Erogol, Kuan Chen, Dawid Kobus, Takuya Ebata,
Trinh Le and Yunchao He},
title = {TensorflowTTS},
year = {2020},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\\url{https://github.com/TensorSpeech/TensorFlowTTS}},
}
```
|
{"language": "fr", "license": "apache-2.0", "tags": ["tensorflowtts", "audio", "text-to-speech", "text-to-mel"], "datasets": ["synpaflex"], "widget": [{"text": "Oh, je voudrais tant que tu te souviennes Des jours heureux quand nous \u00e9tions amis"}]}
|
dathudeptrai/tts-tacotron2-synpaflex-fr
| null |
[
"tensorflowtts",
"audio",
"text-to-speech",
"text-to-mel",
"fr",
"dataset:synpaflex",
"arxiv:1712.05884",
"arxiv:1710.08969",
"license:apache-2.0",
"has_space",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
La descripción en Español se encuentra después de la descripción en Inglés.
# (English) GPT2-small-spanish: a Language Model for Spanish text generation (and more NLP tasks...)
GPT2-small-spanish is a state-of-the-art language model for Spanish based on the GPT-2 small model.
It was trained on Spanish Wikipedia using **Transfer Learning and Fine-tuning techniques**. The training took around 70 hours with four GPU NVIDIA GTX 1080-Ti with 11GB of DDR5 and with around 3GB of (processed) training data.
It was fine-tuned from the [English pre-trained GPT-2 small](https://huggingface.co/gpt2) using the Hugging Face libraries (Transformers and Tokenizers) wrapped into the [fastai v2](https://dev.fast.ai/) Deep Learning framework. All the fine-tuning fastai v2 techniques were used.
The training is purely based on the [GPorTuguese-2](https://huggingface.co/pierreguillou/gpt2-small-portuguese) model developed by Pierre Guillou. The training details are in this article: "[Faster than training from scratch — Fine-tuning the English GPT-2 in any language with Hugging Face and fastai v2 (practical case with Portuguese)](https://medium.com/@pierre_guillou/faster-than-training-from-scratch-fine-tuning-the-english-gpt-2-in-any-language-with-hugging-f2ec05c98787)".
This preliminary version is now available on Hugging Face.
## Limitations and bias
(Copied from original GPorTuguese-2 model)The training data used for this model come from Spanish Wikipedia. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their model card:
> Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases that require the generated text to be true. Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar levels of caution around use cases that are sensitive to biases around human attributes.
## Authors
The model was trained and evaluated by [Josué Obregon](https://www.linkedin.com/in/josue-obregon/) and [Berny Carrera](https://www.linkedin.com/in/bernycarrera/), founders of [Datificate](https://datificate.com), a space for learning Machine Learning in Spanish.
The training was possible thanks to the computing power of several GPUs (GPU NVIDIA GTX1080-Ti) of the [IAI Lab](http://iai.khu.ac.kr/) (Kyung Hee University) from which Josué is attached as a Postdoctoral Researcher in Industrial Artificial Intelligence.
As stated before, this work is mainly based in the work of [Pierre GUILLOU](https://www.linkedin.com/in/pierreguillou/).
# (Español) GPT2-small-spanish: un modelo de lenguaje para generación de texto en Español (y algunas otras tareas de NLP...)
GPT2-small-spanish es un modelo de lenguaje de vanguardia en Español basado en el modelo pequeño GPT-2.
Fué entrenado con la Wikipedia en Español usando **técnicas de Aprendizaje por Transferencia y afinación de modelos**. El entrenamiento del modelo tomó alrededor 70 horas con cuatro GPUs NVIDIA GTX 1080-Ti con 11GB de DDR5 y con aproximadamente 3GB de datos de entrenamiento preprocesados.
Fue afinado del modelo en Inglés [English pre-trained GPT-2 small](https://huggingface.co/gpt2) utilizando las librerías de Hugging Face (Transformers y Tokenizers) integradas con el framework de Deep Learning [fastai v2](https://dev.fast.ai/). Se usaron técnicas de afinamiento fino de fastai v2.
El entrenamiento está enteramente basado en el modelo en Portugués [GPorTuguese-2](https://huggingface.co/pierreguillou/gpt2-small-portuguese) desarrollado por Pierre Guillou. Los detalles del entrenamiento se encuentran en este articulo: "[Faster than training from scratch — Fine-tuning the English GPT-2 in any language with Hugging Face and fastai v2 (practical case with Portuguese)](https://medium.com/@pierre_guillou/faster-than-training-from-scratch-fine-tuning-the-english-gpt-2-in-any-language-with-hugging-f2ec05c98787)".
La versión preliminar del modelo se encuentra en Hugging Face.
## Limitaciones y sesgos
(Copiado del modelo original GPorTuguese-2 model)Los datos de entrenamiento provienen de la Wikipedia en Español. Se sabe que contiene bastante contenido no filtrado del internet, lo cual está lejos de ser neutral. Esto es señalado por el equipo desarrollador de openAI en su propia tarjeta de modelo:
> Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases that require the generated text to be true. Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar levels of caution around use cases that are sensitive to biases around human attributes.
## Autores
El modelo fue entreando y evaluado por [Josué Obregon](https://www.linkedin.com/in/josue-obregon/) y [Berny Carrera](https://www.linkedin.com/in/bernycarrera/), fundadores de [Datificate](https://datificate.com), un espacio para aprender Machine Learning en Español.
El entrenamiento fue posible gracias al poder computacional de varias GPUs (GPU NVIDIA GTX1080-Ti) del Laboratorio de Inteligencia Artificial Industrial [IAI Lab](http://iai.khu.ac.kr/) (Universidad de Kyung Hee) al cual Josué pertenece como investigador postdoctoral en Inteligencia Artificial Industrial.
Como fue mencionado anteriormente, este trabajo está basado en el trabajo de [Pierre GUILLOU](https://www.linkedin.com/in/pierreguillou/).
|
{"language": "es", "license": "apache-2.0", "datasets": ["wikipedia"], "widget": [{"text": "La inteligencia artificial en lationoam\u00e9rica se ha desarrollado "}]}
|
datificate/gpt2-small-spanish
| null |
[
"transformers",
"pytorch",
"tf",
"jax",
"gpt2",
"text-generation",
"es",
"dataset:wikipedia",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
fill-mask
|
transformers
|
# <a name="introduction"></a> PhoBERT: Pre-trained language models for Vietnamese
Pre-trained PhoBERT models are the state-of-the-art language models for Vietnamese ([Pho](https://en.wikipedia.org/wiki/Pho), i.e. "Phở", is a popular food in Vietnam):
- Two PhoBERT versions of "base" and "large" are the first public large-scale monolingual language models pre-trained for Vietnamese. PhoBERT pre-training approach is based on [RoBERTa](https://github.com/pytorch/fairseq/blob/master/examples/roberta/README.md) which optimizes the [BERT](https://github.com/google-research/bert) pre-training procedure for more robust performance.
- PhoBERT outperforms previous monolingual and multilingual approaches, obtaining new state-of-the-art performances on four downstream Vietnamese NLP tasks of Part-of-speech tagging, Dependency parsing, Named-entity recognition and Natural language inference.
The general architecture and experimental results of PhoBERT can be found in our EMNLP-2020 Findings [paper](https://arxiv.org/abs/2003.00744):
@article{phobert,
title = {{PhoBERT: Pre-trained language models for Vietnamese}},
author = {Dat Quoc Nguyen and Anh Tuan Nguyen},
journal = {Findings of EMNLP},
year = {2020}
}
**Please CITE** our paper when PhoBERT is used to help produce published results or is incorporated into other software.
For further information or requests, please go to [PhoBERT's homepage](https://github.com/VinAIResearch/PhoBERT)!
### Installation <a name="install2"></a>
- Python 3.6+, and PyTorch 1.1.0+ (or TensorFlow 2.0+)
- Install `transformers`:
- `git clone https://github.com/huggingface/transformers.git`
- `cd transformers`
- `pip3 install --upgrade .`
### Pre-trained models <a name="models2"></a>
Model | #params | Arch. | Pre-training data
---|---|---|---
`vinai/phobert-base` | 135M | base | 20GB of texts
`vinai/phobert-large` | 370M | large | 20GB of texts
### Example usage <a name="usage2"></a>
```python
import torch
from transformers import AutoModel, AutoTokenizer
phobert = AutoModel.from_pretrained("vinai/phobert-base")
tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base")
# INPUT TEXT MUST BE ALREADY WORD-SEGMENTED!
line = "Tôi là sinh_viên trường đại_học Công_nghệ ."
input_ids = torch.tensor([tokenizer.encode(line)])
with torch.no_grad():
features = phobert(input_ids) # Models outputs are now tuples
## With TensorFlow 2.0+:
# from transformers import TFAutoModel
# phobert = TFAutoModel.from_pretrained("vinai/phobert-base")
```
|
{}
|
datnth1709/Phobert-classifier
| null |
[
"transformers",
"pytorch",
"tf",
"jax",
"roberta",
"fill-mask",
"arxiv:2003.00744",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
datoad4510/nn-classifier-test
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-generation
|
transformers
|
#Harry Potter DialoGPT Model
|
{"tags": ["conversational"]}
|
dats/DialoGPT-small-harrypotter
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
# Tony Stark DialoGPT model
Invite me to your discord server : https://discord.com/api/oauth2/authorize?client_id=885065886787063848&permissions=137439365184&scope=bot
|
{"tags": ["conversational"]}
|
dattam/DialoGPT-medium-TonyStarkBot
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
token-classification
|
transformers
|
BioBERT model fine-tuned in NER task with BC5CDR-diseases and NCBI-diseases corpus along with selected pubtator annotations from LitCOVID dataset
This was fine-tuned in order to use it in a datummd/bionlp system which is available at: https://github.com/datummd/bionlp
|
{"language": ["en"], "license": "apache-2.0", "tags": ["BioBERT", "Diseases", "NER"], "datasets": ["ncbi_disease", "BC5CDR-diseases", "LitCOVID-pubtator"]}
|
datummd/NCBI_BC5CDR_disease
| null |
[
"transformers",
"pytorch",
"bert",
"token-classification",
"BioBERT",
"Diseases",
"NER",
"en",
"dataset:ncbi_disease",
"dataset:BC5CDR-diseases",
"dataset:LitCOVID-pubtator",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
davanstrien/albert_1700_tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
text-classification
|
fastai
|
## Model description
This model is intended to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'.
This model was trained on data created from the Digitised printed books (18th-19th Century) book collection. The datasets in this collection are comprised and derived from 49,455 digitised books (65,227 volumes), mainly from the 19th Century. This dataset is dominated by English language books and includes books in several other languages in much smaller numbers.
This model was originally developed for use as part of the Living with Machines project to be able to 'segment' this large dataset of books into different categories based on a 'crude' classification of genre i.e. whether the title was `fiction` or `non-fiction`.
The model's training data (discussed more below) primarily consists of 19th Century book titles from the British Library Digitised printed books (18th-19th century) collection. These books have been catalogued according to British Library cataloguing practices. The model is likely to perform worse on any book titles from earlier or later periods. While the model is multilingual, it has training data in non-English book titles; these appear much less frequently.
## How to use
To use this within fastai, first [install](https://docs.fast.ai/#Installing) version 2 of the fastai library. You can load directly from the Hugging Face hub using the [`huggingface_hub`](https://github.com/huggingface/huggingface_hub) library.
```python
from fastai import load_learner
from huggingface_hub import hf_hub_download
learn = load_learner(
hf_hub_download('davanstrien/bl-books-genre-fastai', filename="model.pkl")
)
learn.predict("Oliver Twist")
```
## Limitations and bias
The model was developed based on data from the British Library's Digitised printed books (18th-19th Century) collection. This dataset is not representative of books from the period covered with biases towards certain types (travel) and a likely absence of books that were difficult to digitise.
The formatting of the British Library books corpus titles may differ from other collections, resulting in worse performance on other collections. It is recommended to evaluate the performance of the model before applying it to your own data. Likely, this model won't perform well for contemporary book titles without further fine-tuning.
## Training data
The training data was created using the Zooniverse platform. British Library cataloguers carried out the majority of the annotations used as training data. More information on the process of creating the training data will be available soon.
### Training procedure
Model training was carried out using the fastai library version 2.5.2.
The notebook using for training the model is available at: https://github.com/Living-with-machines/genre-classification
## Eval result
The model was evaluated on a held out test set:
```
precision recall f1-score support
Fiction 0.91 0.88 0.90 296
Non-fiction 0.94 0.95 0.95 554
accuracy 0.93 850
macro avg 0.93 0.92 0.92 850
weighted avg 0.93 0.93 0.93 850
```
|
{"library_name": "fastai", "tags": ["text-classification", "fastai"], "datasets": ["blbooksgenre"], "widget": [{"text": "Poems on various subjects. Whereto is prefixed a short essay on the structure of English verse"}, {"text": "Two Centuries of Soho: its institutions, firms, and amusements. By the Clergy of St. Anne's, Soho, J. H. Cardwell ... H. B. Freeman ... G. C. Wilton ... assisted by other contributors, etc"}, {"text": "The Adventures of Oliver Twist. [With plates.]"}]}
|
TheBritishLibrary/bl-books-genre-fastai
| null |
[
"fastai",
"text-classification",
"dataset:blbooksgenre",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
davanstrien/blbooks-bad-ocr-tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/blbooks-good-ocr-tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null |
adapter-transformers
|
# Adapter `davanstrien/book-genre-classification` for bert-base-cased
An [adapter](https://adapterhub.ml) for the `bert-base-cased` model that was trained on the [text-classification](https://adapterhub.ml/explore/text-classification/) dataset and includes a prediction head for classification.
This adapter was created for usage with the **[adapter-transformers](https://github.com/Adapter-Hub/adapter-transformers)** library.
## Usage
First, install `adapter-transformers`:
```
pip install -U adapter-transformers
```
_Note: adapter-transformers is a fork of transformers that acts as a drop-in replacement with adapter support. [More](https://docs.adapterhub.ml/installation.html)_
Now, the adapter can be loaded and activated like this:
```python
from transformers import AutoModelWithHeads
model = AutoModelWithHeads.from_pretrained("bert-base-cased")
adapter_name = model.load_adapter("davanstrien/book-genre-classification", source="hf", set_active=True)
```
## Architecture & Training
<!-- Add some description here -->
## Evaluation results
<!-- Add some description here -->
## Citation
<!-- Add some description here -->
|
{"tags": ["bert", "adapterhub:text-classification", "adapter-transformers"]}
|
davanstrien/book-genre-classification
| null |
[
"adapter-transformers",
"bert",
"adapterhub:text-classification",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# convnext_flyswot
This model is a fine-tuned version of [facebook/convnext-base-224-22k](https://huggingface.co/facebook/convnext-base-224-22k) on the image_folder dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1441
- F1: 0.9592
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 666
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| No log | 1.0 | 52 | 0.6833 | 0.7484 |
| No log | 2.0 | 104 | 0.3666 | 0.8750 |
| No log | 3.0 | 156 | 0.2090 | 0.9321 |
| No log | 4.0 | 208 | 0.1478 | 0.9449 |
| No log | 5.0 | 260 | 0.1002 | 0.9518 |
| No log | 6.0 | 312 | 0.1053 | 0.9506 |
| No log | 7.0 | 364 | 0.1182 | 0.9616 |
| No log | 8.0 | 416 | 0.1102 | 0.9592 |
| No log | 9.0 | 468 | 0.1262 | 0.9616 |
| 0.203 | 10.0 | 520 | 0.1286 | 0.9616 |
| 0.203 | 11.0 | 572 | 0.1355 | 0.9592 |
| 0.203 | 12.0 | 624 | 0.1299 | 0.9592 |
| 0.203 | 13.0 | 676 | 0.1154 | 0.9592 |
| 0.203 | 14.0 | 728 | 0.1385 | 0.9580 |
| 0.203 | 15.0 | 780 | 0.1330 | 0.9592 |
| 0.203 | 16.0 | 832 | 0.1390 | 0.9592 |
| 0.203 | 17.0 | 884 | 0.1386 | 0.9592 |
| 0.203 | 18.0 | 936 | 0.1390 | 0.9592 |
| 0.203 | 19.0 | 988 | 0.1409 | 0.9592 |
| 0.0006 | 20.0 | 1040 | 0.1411 | 0.9592 |
| 0.0006 | 21.0 | 1092 | 0.1413 | 0.9592 |
| 0.0006 | 22.0 | 1144 | 0.1415 | 0.9592 |
| 0.0006 | 23.0 | 1196 | 0.1426 | 0.9592 |
| 0.0006 | 24.0 | 1248 | 0.1435 | 0.9592 |
| 0.0006 | 25.0 | 1300 | 0.1438 | 0.9592 |
| 0.0006 | 26.0 | 1352 | 0.1434 | 0.9592 |
| 0.0006 | 27.0 | 1404 | 0.1437 | 0.9592 |
| 0.0006 | 28.0 | 1456 | 0.1441 | 0.9592 |
| 0.0002 | 29.0 | 1508 | 0.1440 | 0.9592 |
| 0.0002 | 30.0 | 1560 | 0.1441 | 0.9592 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.6
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["image_folder"], "metrics": ["f1"], "base_model": "facebook/convnext-base-224-22k", "model-index": [{"name": "convnext_flyswot", "results": [{"task": {"type": "image-classification", "name": "Image Classification"}, "dataset": {"name": "image_folder", "type": "image_folder", "args": "default"}, "metrics": [{"type": "f1", "value": 0.959245529738118, "name": "F1"}]}]}]}
|
davanstrien/convnext_flyswot
| null |
[
"transformers",
"pytorch",
"safetensors",
"convnext",
"image-classification",
"generated_from_trainer",
"dataset:image_folder",
"base_model:facebook/convnext-base-224-22k",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# convnext_manuscript_iiif
This model is a fine-tuned version of [facebook/convnext-base-224-22k](https://huggingface.co/facebook/convnext-base-224-22k) on the davanstrien/iiif_manuscripts_label_ge_50 dataset.
It achieves the following results on the evaluation set:
- Loss: 5.5856
- F1: 0.0037
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0002
- train_batch_size: 64
- eval_batch_size: 64
- seed: 1337
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 6.5753 | 1.0 | 2038 | 6.4121 | 0.0016 |
| 5.9865 | 2.0 | 4076 | 5.9466 | 0.0021 |
| 5.6521 | 3.0 | 6114 | 5.7645 | 0.0029 |
| 5.3123 | 4.0 | 8152 | 5.6890 | 0.0033 |
| 5.0337 | 5.0 | 10190 | 5.6692 | 0.0034 |
| 4.743 | 6.0 | 12228 | 5.5856 | 0.0037 |
| 4.4387 | 7.0 | 14266 | 5.5969 | 0.0042 |
| 4.1422 | 8.0 | 16304 | 5.6711 | 0.0043 |
| 3.8372 | 9.0 | 18342 | 5.6761 | 0.0044 |
| 3.5244 | 10.0 | 20380 | 5.8469 | 0.0042 |
| 3.2321 | 11.0 | 22418 | 5.8774 | 0.0045 |
| 2.9004 | 12.0 | 24456 | 6.1186 | 0.0047 |
| 2.5937 | 13.0 | 26494 | 6.2398 | 0.0046 |
| 2.2983 | 14.0 | 28532 | 6.3732 | 0.0049 |
| 2.0611 | 15.0 | 30570 | 6.5024 | 0.0045 |
| 1.8153 | 16.0 | 32608 | 6.6585 | 0.0047 |
| 1.6075 | 17.0 | 34646 | 6.8333 | 0.0043 |
| 1.4342 | 18.0 | 36684 | 6.9529 | 0.0044 |
| 1.2614 | 19.0 | 38722 | 7.1129 | 0.0046 |
| 1.1463 | 20.0 | 40760 | 7.1977 | 0.0039 |
| 1.0387 | 21.0 | 42798 | 7.2700 | 0.0044 |
| 0.9635 | 22.0 | 44836 | 7.3375 | 0.0040 |
| 0.8872 | 23.0 | 46874 | 7.4003 | 0.0039 |
| 0.8156 | 24.0 | 48912 | 7.4884 | 0.0039 |
| 0.7544 | 25.0 | 50950 | 7.4764 | 0.0039 |
| 0.6893 | 26.0 | 52988 | 7.5153 | 0.0042 |
| 0.6767 | 27.0 | 55026 | 7.5427 | 0.0043 |
| 0.6098 | 28.0 | 57064 | 7.5547 | 0.0042 |
| 0.5871 | 29.0 | 59102 | 7.5533 | 0.0041 |
| 0.5696 | 30.0 | 61140 | 7.5595 | 0.0041 |
### Framework versions
- Transformers 4.18.0.dev0
- Pytorch 1.10.2+cu102
- Datasets 1.18.3
- Tokenizers 0.11.6
|
{"license": "apache-2.0", "tags": ["image-classification", "generated_from_trainer"], "metrics": ["f1"], "base_model": "facebook/convnext-base-224-22k", "model-index": [{"name": "convnext_manuscript_iiif", "results": []}]}
|
davanstrien/convnext_manuscript_iiif
| null |
[
"transformers",
"pytorch",
"safetensors",
"convnext",
"image-classification",
"generated_from_trainer",
"base_model:facebook/convnext-base-224-22k",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
object-detection
|
transformers
|
# detr_beyond_words (WIP)
[facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50) fine tuned on [Beyond Words](https://github.com/LibraryOfCongress/newspaper-navigator/tree/master/beyond_words_data).
|
{"license": "mit", "tags": ["object-detection"], "widget": [{"src": "https://huggingface.co/davanstrien/detr_beyond_words/resolve/main/19.jpg", "example_title": "page"}, {"src": "https://huggingface.co/davanstrien/detr_beyond_words/resolve/main/65.jpg", "example_title": "page2"}]}
|
davanstrien/detr_beyond_words
| null |
[
"transformers",
"pytorch",
"tensorboard",
"safetensors",
"detr",
"object-detection",
"license:mit",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
davanstrien/distilbert-base-cased_tokenizer_1700_1799
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/distilroberta-base-finetuned-blbooks-1700s
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/distilroberta_1700_tok
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/distilroberta_1700_tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/eighteenth-century-albert-tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
fill-mask
|
transformers
|
{}
|
davanstrien/eighteenth-century-distilbert
| null |
[
"transformers",
"pytorch",
"distilbert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
# flyswot
## Model description
In progress model for detecting 'fake' flysheets
## Intended uses & limitations
Not currently intended for public consumption...
#### Limitations and bias
Not currently intended for public consumption...
## Training data
TODO
## Eval results
|
{}
|
davanstrien/flyswot-test
| null |
[
"onnx",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
TODO
## Model description
In progress model for detecting 'fake' flysheets
## Intended uses & limitations
Not currently intended for public consumption...
## Limitations and bias
Not currently intended for public consumption...
## Training data
## Eval results
|
{}
|
davanstrien/flyswot
| null |
[
"onnx",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# flyswot_iiif
This model is a fine-tuned version of [facebook/convnext-base-224-22k](https://huggingface.co/facebook/convnext-base-224-22k) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 6.1280
- F1: 0.0034
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 666
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8
- mixed_precision_training: Native AMP
- label_smoothing_factor: 0.1
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 8.5184 | 0.26 | 500 | 7.9280 | 0.0005 |
| 7.7409 | 0.52 | 1000 | 7.5824 | 0.0007 |
| 7.4649 | 0.78 | 1500 | 7.3841 | 0.0010 |
| 7.3285 | 1.04 | 2000 | 7.2652 | 0.0012 |
| 7.1404 | 1.3 | 2500 | 7.1559 | 0.0014 |
| 7.0322 | 1.56 | 3000 | 7.0551 | 0.0016 |
| 6.9197 | 1.82 | 3500 | 6.9449 | 0.0019 |
| 6.7822 | 2.09 | 4000 | 6.8773 | 0.0018 |
| 6.6506 | 2.35 | 4500 | 6.7980 | 0.0020 |
| 6.5811 | 2.61 | 5000 | 6.7382 | 0.0022 |
| 6.538 | 2.87 | 5500 | 6.6582 | 0.0022 |
| 6.4136 | 3.13 | 6000 | 6.6013 | 0.0024 |
| 6.3325 | 3.39 | 6500 | 6.5369 | 0.0024 |
| 6.2566 | 3.65 | 7000 | 6.4875 | 0.0025 |
| 6.2285 | 3.91 | 7500 | 6.4342 | 0.0027 |
| 6.1281 | 4.17 | 8000 | 6.4066 | 0.0027 |
| 6.0762 | 4.43 | 8500 | 6.3674 | 0.0027 |
| 6.0309 | 4.69 | 9000 | 6.3336 | 0.0027 |
| 6.0123 | 4.95 | 9500 | 6.2932 | 0.0030 |
| 5.9089 | 5.21 | 10000 | 6.2835 | 0.0029 |
| 5.8901 | 5.47 | 10500 | 6.2481 | 0.0030 |
| 5.86 | 5.74 | 11000 | 6.2295 | 0.0030 |
| 5.8586 | 6.0 | 11500 | 6.2068 | 0.0033 |
| 5.7768 | 6.26 | 12000 | 6.1937 | 0.0031 |
| 5.7591 | 6.52 | 12500 | 6.1916 | 0.0032 |
| 5.7443 | 6.78 | 13000 | 6.1579 | 0.0033 |
| 5.7125 | 7.04 | 13500 | 6.1478 | 0.0033 |
| 5.6751 | 7.3 | 14000 | 6.1379 | 0.0035 |
| 5.6648 | 7.56 | 14500 | 6.1304 | 0.0035 |
| 5.6644 | 7.82 | 15000 | 6.1280 | 0.0034 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.6
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["f1"], "base_model": "facebook/convnext-base-224-22k", "model-index": [{"name": "flyswot_iiif", "results": []}]}
|
davanstrien/flyswot_iiif
| null |
[
"transformers",
"pytorch",
"convnext",
"image-classification",
"generated_from_trainer",
"base_model:facebook/convnext-base-224-22k",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# flyswot_test
This model is a fine-tuned version of [facebook/convnext-base-224-22k](https://huggingface.co/facebook/convnext-base-224-22k) on the image_folder dataset.
It achieves the following results on the evaluation set:
- eval_loss: 0.1518
- eval_f1: 0.9595
- eval_runtime: 5.9337
- eval_samples_per_second: 69.603
- eval_steps_per_second: 2.191
- epoch: 7.0
- step: 364
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 666
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 40
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.6
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["image_folder"], "base_model": "facebook/convnext-base-224-22k", "model-index": [{"name": "flyswot_test", "results": []}]}
|
davanstrien/flyswot_test
| null |
[
"transformers",
"pytorch",
"convnext",
"image-classification",
"generated_from_trainer",
"dataset:image_folder",
"base_model:facebook/convnext-base-224-22k",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# iiif_manuscript_vit
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5684
- F1: 0.5996
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
- mixed_precision_training: Native AMP
- label_smoothing_factor: 0.1
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 0.5639 | 1.0 | 2269 | 0.5822 | 0.5516 |
| 0.5834 | 2.0 | 4538 | 0.5825 | 0.5346 |
| 0.5778 | 3.0 | 6807 | 0.5794 | 0.6034 |
| 0.5735 | 4.0 | 9076 | 0.5742 | 0.5713 |
| 0.5731 | 5.0 | 11345 | 0.5745 | 0.6008 |
| 0.5701 | 6.0 | 13614 | 0.5729 | 0.5499 |
| 0.5696 | 7.0 | 15883 | 0.5717 | 0.5952 |
| 0.5683 | 8.0 | 18152 | 0.5680 | 0.6005 |
| 0.5648 | 9.0 | 20421 | 0.5679 | 0.5967 |
| 0.564 | 10.0 | 22690 | 0.5684 | 0.5996 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.0
|
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["f1"], "base_model": "google/vit-base-patch16-224-in21k", "model-index": [{"name": "iiif_manuscript_vit", "results": []}]}
|
davanstrien/iiif_manuscript_vit
| null |
[
"transformers",
"pytorch",
"vit",
"image-classification",
"generated_from_trainer",
"base_model:google/vit-base-patch16-224-in21k",
"license:apache-2.0",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null |
generic
|
# TODO
-
-
-
-
|
{"library_name": "generic", "tags": ["chemistry"]}
|
davanstrien/test
| null |
[
"generic",
"chemistry",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
davanstrien/testgpt2
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davanstrien/tokenizer
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null |
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# vit-manuscripts
This model is a fine-tuned version of [facebook/vit-mae-base](https://huggingface.co/facebook/vit-mae-base) on the davanstrien/manuscript_iiif_test dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5177
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7.5e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 1337
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.05
- num_epochs: 1.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.5303 | 1.0 | 34 | 0.5134 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.18.2
- Tokenizers 0.11.0
|
{"license": "apache-2.0", "tags": ["masked-auto-encoding", "generated_from_trainer"], "base_model": "facebook/vit-mae-base", "model-index": [{"name": "vit-manuscripts", "results": []}]}
|
davanstrien/vit-manuscripts
| null |
[
"transformers",
"pytorch",
"tensorboard",
"vit_mae",
"pretraining",
"masked-auto-encoding",
"generated_from_trainer",
"base_model:facebook/vit-mae-base",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
null | null |
{}
|
davanstrien/vit_flyswot
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
image-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# vit_flyswot_test
This model is a fine-tuned version of [](https://huggingface.co/) on the image_folder dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4777
- F1: 0.8492
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 666
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| No log | 1.0 | 52 | 1.2007 | 0.3533 |
| No log | 2.0 | 104 | 1.0037 | 0.5525 |
| No log | 3.0 | 156 | 0.8301 | 0.6318 |
| No log | 4.0 | 208 | 0.7224 | 0.6946 |
| No log | 5.0 | 260 | 0.7298 | 0.7145 |
| No log | 6.0 | 312 | 0.6328 | 0.7729 |
| No log | 7.0 | 364 | 0.6010 | 0.7992 |
| No log | 8.0 | 416 | 0.5174 | 0.8364 |
| No log | 9.0 | 468 | 0.5084 | 0.8479 |
| 0.6372 | 10.0 | 520 | 0.4777 | 0.8492 |
### Framework versions
- Transformers 4.17.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.18.3
- Tokenizers 0.11.6
|
{"tags": ["generated_from_trainer"], "datasets": ["image_folder"], "metrics": ["f1"], "model-index": [{"name": "vit_flyswot_test", "results": [{"task": {"type": "image-classification", "name": "Image Classification"}, "dataset": {"name": "image_folder", "type": "image_folder", "args": "default"}, "metrics": [{"type": "f1", "value": 0.849172221610369, "name": "F1"}]}]}]}
|
davanstrien/vit_flyswot_test
| null |
[
"transformers",
"pytorch",
"vit",
"image-classification",
"generated_from_trainer",
"dataset:image_folder",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-classification
|
transformers
|
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-marc-en
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the amazon_reviews_multi dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9199
- Mae: 0.4756
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Mae |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.1705 | 1.0 | 235 | 0.9985 | 0.5854 |
| 0.9721 | 2.0 | 470 | 0.9199 | 0.4756 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.9.0+cu111
- Datasets 1.14.0
- Tokenizers 0.10.3
|
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["amazon_reviews_multi"], "model-index": [{"name": "xlm-roberta-base-finetuned-marc-en", "results": []}]}
|
daveccampbell/xlm-roberta-base-finetuned-marc-en
| null |
[
"transformers",
"pytorch",
"tensorboard",
"xlm-roberta",
"text-classification",
"generated_from_trainer",
"dataset:amazon_reviews_multi",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-classification
|
transformers
|
**Note**: This model & model card are based on the [finetuned XLM-T for Sentiment Analysis](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base-sentiment)
# twitter-XLM-roBERTa-base for Emotion Analysis
This is a XLM-roBERTa-base model trained on ~198M tweets and finetuned for emotion analysis on Spanish language. This model was presented to EmoEvalEs competition, part of [IberLEF 2021 Conference](https://sites.google.com/view/iberlef2021/), where the proposed task was the classification of Spanish tweets between seven different classes: *anger*, *disgust*, *fear*, *joy*, *sadness*, *surprise*, and *other*. We achieved the first position in the competition with a macro-averaged F1 score of 71.70%.
- [Our code for EmoEvalEs submission](https://github.com/gsi-upm/emoevales-iberlef2021).
- [EmoEvalEs Dataset](https://github.com/pendrag/EmoEvalEs)
## Example Pipeline with a [Tweet from @JaSantaolalla](https://twitter.com/JaSantaolalla/status/1398383243645177860)
```python
from transformers import pipeline
model_path = "daveni/twitter-xlm-roberta-emotion-es"
emotion_analysis = pipeline("text-classification", framework="pt", model=model_path, tokenizer=model_path)
emotion_analysis("Einstein dijo: Solo hay dos cosas infinitas, el universo y los pinches anuncios de bitcoin en Twitter. Paren ya carajo aaaaaaghhgggghhh me quiero murir")
```
```
[{'label': 'anger', 'score': 0.48307016491889954}]
```
## Full classification example
```python
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer, AutoConfig
import numpy as np
from scipy.special import softmax
# Preprocess text (username and link placeholders)
def preprocess(text):
new_text = []
for t in text.split(" "):
t = '@user' if t.startswith('@') and len(t) > 1 else t
t = 'http' if t.startswith('http') else t
new_text.append(t)
return " ".join(new_text)
model_path = "daveni/twitter-xlm-roberta-emotion-es"
tokenizer = AutoTokenizer.from_pretrained(model_path )
config = AutoConfig.from_pretrained(model_path )
# PT
model = AutoModelForSequenceClassification.from_pretrained(model_path )
text = "Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal."
text = preprocess(text)
print(text)
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
scores = output[0][0].detach().numpy()
scores = softmax(scores)
# Print labels and scores
ranking = np.argsort(scores)
ranking = ranking[::-1]
for i in range(scores.shape[0]):
l = config.id2label[ranking[i]]
s = scores[ranking[i]]
print(f"{i+1}) {l} {np.round(float(s), 4)}")
```
Output:
```
Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal.
1) joy 0.7887
2) others 0.1679
3) surprise 0.0152
4) sadness 0.0145
5) anger 0.0077
6) disgust 0.0033
7) fear 0.0027
```
#### Limitations and bias
- The dataset we used for finetuning was unbalanced, where almost half of the records belonged to the *other* class so there might be bias towards this class.
## Training data
Pretrained weights were left identical to the original model released by [cardiffnlp](https://huggingface.co/cardiffnlp/twitter-xlm-roberta-base). We used the [EmoEvalEs Dataset](https://github.com/pendrag/EmoEvalEs) for finetuning.
### BibTeX entry and citation info
```bibtex
@inproceedings{vera2021gsi,
title={GSI-UPM at IberLEF2021: Emotion Analysis of Spanish Tweets by Fine-tuning the XLM-RoBERTa Language Model},
author={Vera, D and Araque, O and Iglesias, CA},
booktitle={Proceedings of the Iberian Languages Evaluation Forum (IberLEF 2021). CEUR Workshop Proceedings, CEUR-WS, M{\'a}laga, Spain},
year={2021}
}
```
|
{"language": ["es"], "tags": ["Emotion Analysis"]}
|
daveni/twitter-xlm-roberta-emotion-es
| null |
[
"transformers",
"pytorch",
"xlm-roberta",
"text-classification",
"Emotion Analysis",
"es",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
text-generation
|
transformers
|
{}
|
daveripper0020/essaygpt2
| null |
[
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
daveynkanta/distilbert-base-uncased-finetuned-cola
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
daveynkanta/distilbert-base-uncased-finetuned-squad
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
feature-extraction
|
transformers
|
{}
|
davidcechak/CDNA_bert_6
| null |
[
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
feature-extraction
|
transformers
|
{}
|
davidcechak/tss_bert_6
| null |
[
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
feature-extraction
|
transformers
|
{}
|
davidcechak/tss_bert_6_v1
| null |
[
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
|
null | null |
{}
|
davidsun/bert_finetuned
| null |
[
"region:us"
] | null |
2022-03-02T23:29:05+00:00
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.