modelId
stringlengths 4
112
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 21
values | files
list | publishedBy
stringlengths 2
37
| downloads_last_month
int32 0
9.44M
| library
stringclasses 15
values | modelCard
large_stringlengths 0
100k
|
---|---|---|---|---|---|---|---|---|
WikinewsSum/bert2bert-multi-fr-wiki-news | 2020-08-11T09:05:51.000Z | [
"pytorch",
"encoder-decoder",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| WikinewsSum | 9 | transformers | |
WikinewsSum/t5-base-multi-combine-wiki-news | 2020-07-01T08:43:13.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| WikinewsSum | 13 | transformers | |
WikinewsSum/t5-base-multi-de-wiki-news | 2020-07-01T08:29:08.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 17 | transformers | |
WikinewsSum/t5-base-multi-en-wiki-news | 2020-07-01T08:32:21.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 17 | transformers | |
WikinewsSum/t5-base-multi-fr-wiki-news | 2020-07-01T08:36:23.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 18 | transformers | |
WikinewsSum/t5-base-with-title-multi-de-wiki-news | 2020-07-01T08:30:44.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 23 | transformers | |
WikinewsSum/t5-base-with-title-multi-en-wiki-news | 2020-07-01T08:33:48.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 20 | transformers | |
WikinewsSum/t5-base-with-title-multi-fr-wiki-news | 2020-07-01T08:39:51.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"eval_results.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json",
"training_args.bin"
]
| WikinewsSum | 15 | transformers | |
Wintermute/Wintermute | 2021-05-21T11:40:58.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json",
"vocab.txt"
]
| Wintermute | 66 | transformers | |
Wintermute/Wintermute_extended | 2021-05-21T11:42:01.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json",
"vocab.txt"
]
| Wintermute | 17 | transformers | |
Wonjun/KPTBert | 2021-05-19T11:33:17.000Z | [
"bert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json"
]
| Wonjun | 7 | transformers | |
Xia/albert | 2021-04-28T17:45:16.000Z | []
| [
".gitattributes"
]
| Xia | 0 | |||
XiangPan/roberta_squad1_2epoch | 2021-04-27T01:57:07.000Z | []
| [
".gitattributes"
]
| XiangPan | 0 | |||
Xiaomaxiang/T5-base-question-generation-squad | 2021-05-11T16:42:26.000Z | []
| [
".gitattributes",
"README.md"
]
| Xiaomaxiang | 0 | A |
||
XiaoqiJiao/2nd_General_TinyBERT_6L_768D | 2020-09-02T03:03:02.000Z | [
"pytorch",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tinybert_overview.png",
"vocab.txt"
]
| XiaoqiJiao | 22 | transformers | ||
XiaoqiJiao/TinyBERT_General_4L_312D | 2020-09-02T03:37:19.000Z | [
"pytorch",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tinybert_overview.png",
"vocab.txt"
]
| XiaoqiJiao | 17 | transformers | ||
XiaoqiJiao/TinyBERT_General_6L_768D | 2020-09-02T03:40:56.000Z | [
"pytorch",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tinybert_overview.png",
"vocab.txt"
]
| XiaoqiJiao | 19 | transformers | ||
XxProKillerxX/Meh | 2021-04-15T21:38:50.000Z | []
| [
".gitattributes"
]
| XxProKillerxX | 0 | |||
YSKartal/berturk-social-5m | 2021-05-20T12:32:31.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"training_args.bin",
"vocab.json"
]
| YSKartal | 26 | transformers | |
YacShin/LocationAddressV1 | 2021-02-01T08:22:16.000Z | []
| [
".gitattributes"
]
| YacShin | 0 | |||
Yanzhu/bertweetfr-base | 2021-06-13T07:20:37.000Z | [
"pytorch",
"camembert",
"masked-lm",
"fr",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json"
]
| Yanzhu | 25 | transformers | ---
language: "fr"
---
Domain-adaptive pretraining of camembert-base using 15 GB of French Tweets |
YituTech/conv-bert-base | 2021-02-24T11:26:14.000Z | [
"pytorch",
"tf",
"convbert",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tf_model.h5",
"vocab.txt"
]
| YituTech | 1,460 | transformers | ||
YituTech/conv-bert-medium-small | 2021-02-24T11:24:27.000Z | [
"pytorch",
"tf",
"convbert",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tf_model.h5",
"vocab.txt"
]
| YituTech | 236 | transformers | ||
YituTech/conv-bert-small | 2021-02-24T11:26:46.000Z | [
"pytorch",
"tf",
"convbert",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tf_model.h5",
"vocab.txt"
]
| YituTech | 689 | transformers | ||
Yongqi/gru_bidaf | 2021-05-10T06:50:07.000Z | []
| [
".gitattributes"
]
| Yongqi | 0 | |||
Yotam/new | 2021-05-14T04:24:53.000Z | []
| [
".gitattributes",
"README.md"
]
| Yotam | 0 | |||
Yunus/mymodel | 2020-11-17T04:22:01.000Z | []
| [
".gitattributes",
"README.md"
]
| Yunus | 0 | hello
|
||
Yuriy/wer | 2021-04-11T00:22:26.000Z | []
| [
".gitattributes"
]
| Yuriy | 0 | |||
Yuuryoku/Junko_Enoshima | 2021-06-05T02:46:15.000Z | []
| [
".gitattributes"
]
| Yuuryoku | 0 | |||
Yves/wav2vec2-large-xlsr-53-swiss-german | 2021-06-11T15:45:30.000Z | [
"pytorch",
"wav2vec2",
"sg",
"dataset:Yves/fhnw_swiss_parliament",
"transformers",
"audio",
"speech",
"automatic-speech-recognition",
"xlsr-fine-tuning-week",
"PyTorch",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| Yves | 135 | transformers | ---
language: sg
datasets:
- Yves/fhnw_swiss_parliament
metrics:
- wer
tags:
- audio
- speech
- wav2vec2
- sg
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
- PyTorch
license: apache-2.0
model-index:
- name: Yves XLSR Wav2Vec2 Large 53 Swiss German
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Yves/fhnw_swiss_parliament
type: Yves/fhnw_swiss_parliament
metrics:
- name: Test WER
type: wer
value: NA%
---
# wav2vec2-large-xlsr-53-swiss-german
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Swiss German trying to achieve satisfactory Swiss-German to German transcriptions
## Dataset
Detailed information about the dataset that the model has been trained and validated with is available on [Yves/fhnw_swiss_parliament](https://huggingface.co/datasets/Yves/fhnw_swiss_parliament)
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("Yves/fhnw_swiss_parliament", data_dir="swiss_parliament", split="validation")
processor = Wav2Vec2Processor.from_pretrained("Yves/wav2vec2-large-xlsr-53-swiss-german")
model = Wav2Vec2ForCTC.from_pretrained("Yves/wav2vec2-large-xlsr-53-swiss-german").cuda()
resampler = torchaudio.transforms.Resample(48_000, 16_000)
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.cuda(), attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"])
```
## Evaluation
```python
import torchaudio
from datasets import load_dataset, load_metric
from transformers import (
Wav2Vec2ForCTC,
Wav2Vec2Processor,
)
import torch
import re
import sys
import csv
model_name = "Yves/wav2vec2-large-xlsr-53-swiss-german"
device = "cuda"
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\_\²\…\˟\&\+\[\]\(\−\–\)\›\»\‹\@\«\*\ʼ\/\°\'\'\’\'̈]'
completed_iterations = 0
eval_batch_size = 16
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
processor = Wav2Vec2Processor.from_pretrained(model_name)
ds = load_dataset("Yves/fhnw_swiss_parliament", data_dir="container_0/swiss_parliament_dryrun", split="validation")
wer = load_metric("wer")
cer = load_metric("cer")
bleu = load_metric("sacrebleu")
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
batch["sampling_rate"] = resampler.new_freq
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
return batch
ds = ds.map(map_to_array)
out_file = open('output.tsv', 'w', encoding='utf-8')
tsv_writer = csv.writer(out_file, delimiter='\t')
tsv_writer.writerow(["client_id", "reference", "prediction", "wer", "cer", "bleu"])
def map_to_pred(batch,idx):
features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt")
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["predicted"] = processor.batch_decode(pred_ids)
batch["target"] = batch["sentence"]
if not (len(idx) <= 2 and idx[0] == 0):
for x in range(0, len(idx)):
temp_reference = []
temp_reference.append([batch["target"][x]])
tsv_writer.writerow([batch["client_id"][x], batch["target"][x], batch["predicted"][x],
wer.compute(predictions=[batch["predicted"][x]], references=[batch["sentence"][x]]),
cer.compute(predictions=[batch["predicted"][x]], references=[batch["sentence"][x]]),
bleu.compute(predictions=[batch["predicted"][x]], references=temp_reference)["score"]])
return batch
result = ds.map(map_to_pred, batched=True, batch_size=eval_batch_size, with_indices=True, remove_columns=list(ds.features.keys()))
out_file.close()
target_bleu = []
for x in result["target"]:
target_bleu.append([x])
print(wer.compute(predictions=result["predicted"], references=result["target"]))
print(cer.compute(predictions=result["predicted"], references=result["target"]))
print(bleu.compute(predictions=result["predicted"], references=target_bleu))
```
## Scripts
The script used for training can be found on Google Colab [TBD](https://huggingface.co/Yves/wav2vec2-large-xlsr-53-swiss-german) |
ZYW/Xquad | 2021-05-26T02:42:47.000Z | []
| [
".gitattributes"
]
| ZYW | 0 | |||
ZYW/en-de-es-model | 2021-05-29T17:28:09.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 10 | transformers | ---
model-index:
- name: en-de-es-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# en-de-es-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/en-de-model | 2021-05-29T17:52:17.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 11 | transformers | ---
model-index:
- name: en-de-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# en-de-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/en-de-vi-zh-es-model | 2021-05-29T17:33:12.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 19 | transformers | ---
model-index:
- name: en-de-vi-zh-es-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# en-de-vi-zh-es-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-en-de-es-model | 2021-05-29T16:53:56.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 12 | transformers | ---
model-index:
- name: squad-en-de-es-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-en-de-es-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-en-de-es-vi-zh-model | 2021-05-29T21:46:39.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 23 | transformers | ---
model-index:
- name: squad-en-de-es-vi-zh-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-en-de-es-vi-zh-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-mbart-model | 2021-05-30T16:12:15.000Z | [
"pytorch",
"mbart",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 37 | transformers | ---
model-index:
- name: squad-mbart-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-mbart-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-mbert-en-de-es-model | 2021-05-30T22:33:10.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 12 | transformers | ---
model-index:
- name: squad-mbert-en-de-es-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-mbert-en-de-es-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-mbert-en-de-es-vi-zh-model | 2021-05-31T05:43:16.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 14 | transformers | ---
model-index:
- name: squad-mbert-en-de-es-vi-zh-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-mbert-en-de-es-vi-zh-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-mbert-model | 2021-05-30T15:15:53.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 30 | transformers | ---
model-index:
- name: squad-mbert-model
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-mbert-model
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/squad-mbert-model_2 | 2021-05-30T18:18:37.000Z | [
"pytorch",
"bert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 6 | transformers | ---
model-index:
- name: squad-mbert-model_2
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# squad-mbert-model_2
This model was trained from scratch on an unkown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.7.0
- Tokenizers 0.10.3
|
ZYW/test-squad-trained | 2021-05-26T02:38:39.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| ZYW | 32 | transformers | ---
model-index:
- name: test-squad-trained
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# test-squad-trained
This model was trained from scratch on an unkown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2026
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.988 | 1.0 | 5486 | 1.1790 |
| 0.7793 | 2.0 | 10972 | 1.2026 |
| 0.8068 | 3.0 | 16458 | 1.2026 |
### Framework versions
- Transformers 4.6.1
- Pytorch 1.8.1+cu101
- Datasets 1.6.2
- Tokenizers 0.10.3
|
Zaid/wav2vec2-large-xlsr-53-arabic-egyptian | 2021-03-22T07:28:09.000Z | [
"pytorch",
"wav2vec2",
"???",
"dataset:common_voice",
"transformers",
"audio",
"automatic-speech-recognition",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"dialects_speech_corpus.py",
"optimizer.pt",
"preprocessor_config.json",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer_config.json",
"trainer_state.json",
"training_args.bin",
"vocab.json"
]
| Zaid | 312 | transformers | ---
language: ???
datasets:
- common_voice
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Arabic Egyptian by Zaid
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice ???
type: common_voice
args: ???
metrics:
- name: Test WER
type: wer
value: ???
---
# Wav2Vec2-Large-XLSR-53-Tamil
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in Tamil using the [Common Voice](https://huggingface.co/datasets/common_voice)
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "???", split="test[:2%]").
processor = Wav2Vec2Processor.from_pretrained("Zaid/wav2vec2-large-xlsr-53-arabic-egyptian")
model = Wav2Vec2ForCTC.from_pretrained("Zaid/wav2vec2-large-xlsr-53-arabic-egyptian")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the {language} test data of Common Voice.
```python
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "???", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("Zaid/wav2vec2-large-xlsr-53-arabic-egyptian")
model = Wav2Vec2ForCTC.from_pretrained("Zaid/wav2vec2-large-xlsr-53-arabic-egyptian")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]'
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: ??? %
## Training
The Common Voice `train`, `validation` datasets were used for training.
The script used for training can be found ??? |
Zaid/wav2vec2-large-xlsr-dialect-classification | 2021-04-06T16:10:38.000Z | [
"pytorch",
"wav2vec2",
"transformers"
]
| [
".gitattributes",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"trainer_state.json",
"training_args.bin"
]
| Zaid | 194 | transformers | ||
ZiweiG/ziwei-bert-imdb | 2021-05-18T22:52:12.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| ZiweiG | 17 | transformers | |
ZiweiG/ziwei-bertimdb-prob | 2021-05-18T22:53:05.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| ZiweiG | 16 | transformers | |
Zoe/model_covid | 2021-01-28T23:30:52.000Z | []
| [
".gitattributes"
]
| Zoe | 0 | |||
Zwrok/Start | 2021-03-12T17:50:11.000Z | []
| [
".gitattributes"
]
| Zwrok | 0 | |||
a-ware/bart-squadv2 | 2020-12-11T21:30:58.000Z | [
"pytorch",
"bart",
"question-answering",
"dataset:squad_v2",
"arxiv:1910.13461",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"model_args.json",
"nbest_predictions_test.json",
"null_odds_test.json",
"predictions_test.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 122 | transformers | ---
datasets:
- squad_v2
---
# BART-LARGE finetuned on SQuADv2
This is bart-large model finetuned on SQuADv2 dataset for question answering task
## Model details
BART was propsed in the [paper](https://arxiv.org/abs/1910.13461) **BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension**.
BART is a seq2seq model intended for both NLG and NLU tasks.
To use BART for question answering tasks, we feed the complete document into the encoder and decoder, and use the top
hidden state of the decoder as a representation for each
word. This representation is used to classify the token. As given in the paper bart-large achives comparable to ROBERTa on SQuAD.
Another notable thing about BART is that it can handle sequences with upto 1024 tokens.
| Param | #Value |
|---------------------|--------|
| encoder layers | 12 |
| decoder layers | 12 |
| hidden size | 4096 |
| num attetion heads | 16 |
| on disk size | 1.63GB |
## Model training
This model was trained with following parameters using simpletransformers wrapper:
```
train_args = {
'learning_rate': 1e-5,
'max_seq_length': 512,
'doc_stride': 512,
'overwrite_output_dir': True,
'reprocess_input_data': False,
'train_batch_size': 8,
'num_train_epochs': 2,
'gradient_accumulation_steps': 2,
'no_cache': True,
'use_cached_eval_features': False,
'save_model_every_epoch': False,
'output_dir': "bart-squadv2",
'eval_batch_size': 32,
'fp16_opt_level': 'O2',
}
```
[You can even train your own model using this colab notebook](https://colab.research.google.com/drive/1I5cK1M_0dLaf5xoewh6swcm5nAInfwHy?usp=sharing)
## Results
```{"correct": 6832, "similar": 4409, "incorrect": 632, "eval_loss": -14.950117511952177}```
## Model in Action 🚀
```python3
from transformers import BartTokenizer, BartForQuestionAnswering
import torch
tokenizer = BartTokenizer.from_pretrained('a-ware/bart-squadv2')
model = BartForQuestionAnswering.from_pretrained('a-ware/bart-squadv2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer(question, text, return_tensors='pt')
input_ids = encoding['input_ids']
attention_mask = encoding['attention_mask']
start_scores, end_scores = model(input_ids, attention_mask=attention_mask, output_attentions=False)[:2]
all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
answer = tokenizer.convert_tokens_to_ids(answer.split())
answer = tokenizer.decode(answer)
#answer => 'a nice puppet'
```
> Created with ❤️ by A-ware UG [](https://github.com/aware-ai)
|
a-ware/distilbart-xsum-12-3-squadv2 | 2020-06-26T21:05:39.000Z | [
"pytorch",
"bart",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"merges.txt",
"model_args.json",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 18 | transformers | |
a-ware/distilbart-xsum-12-6-squadv2 | 2020-06-28T11:04:49.000Z | [
"pytorch",
"bart",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"merges.txt",
"model_args.json",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 15 | transformers | |
a-ware/longformer-QA | 2020-08-07T09:40:36.000Z | [
"pytorch",
"tf",
"longformer",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"merges.txt",
"model_args.json",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 50 | transformers | |
a-ware/longformer-squadv2 | 2020-08-07T11:30:59.000Z | [
"pytorch",
"tf",
"longformer",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"merges.txt",
"model_args.json",
"nbest_predictions_test.json",
"null_odds_test.json",
"optimizer.pt",
"predictions_test.json",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 34 | transformers | |
a-ware/mobilebert-squadv2 | 2020-06-30T21:58:56.000Z | [
"pytorch",
"tfsavedmodel",
"mobilebert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"model_args.json",
"nbest_predictions_test.json",
"null_odds_test.json",
"predictions_test.json",
"pytorch_model.bin",
"saved_model.tar.gz",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| a-ware | 27 | transformers | |
a-ware/roberta-large-squad-classification | 2021-05-20T12:35:01.000Z | [
"pytorch",
"jax",
"roberta",
"text-classification",
"dataset:squad_v2",
"transformers"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"model_args.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 141 | transformers | ---
datasets:
- squad_v2
---
# Roberta-LARGE finetuned on SQuADv2
This is roberta-large model finetuned on SQuADv2 dataset for question answering answerability classification
## Model details
This model is simply an Sequenceclassification model with two inputs (context and question) in a list.
The result is either [1] for answerable or [0] if it is not answerable.
It was trained over 4 epochs on squadv2 dataset and can be used to filter out which context is good to give into the QA model to avoid bad answers.
## Model training
This model was trained with following parameters using simpletransformers wrapper:
```
train_args = {
'learning_rate': 1e-5,
'max_seq_length': 512,
'overwrite_output_dir': True,
'reprocess_input_data': False,
'train_batch_size': 4,
'num_train_epochs': 4,
'gradient_accumulation_steps': 2,
'no_cache': True,
'use_cached_eval_features': False,
'save_model_every_epoch': False,
'output_dir': "bart-squadv2",
'eval_batch_size': 8,
'fp16_opt_level': 'O2',
}
```
## Results
```{"accuracy": 90.48%}```
## Model in Action 🚀
```python3
from simpletransformers.classification import ClassificationModel
model = ClassificationModel('roberta', 'a-ware/roberta-large-squadv2', num_labels=2, args=train_args)
predictions, raw_outputs = model.predict([["my dog is an year old. he loves to go into the rain", "how old is my dog ?"]])
print(predictions)
==> [1]
```
> Created with ❤️ by A-ware UG [](https://github.com/aware-ai)
|
a-ware/roberta-large-squadv2 | 2021-05-20T12:37:36.000Z | [
"pytorch",
"jax",
"tfsavedmodel",
"roberta",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"model_args.json",
"nbest_predictions_test.json",
"null_odds_test.json",
"predictions_test.json",
"pytorch_model.bin",
"saved_model.tar.gz",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| a-ware | 954 | transformers | |
a-ware/xlmroberta-QA | 2020-07-07T10:05:15.000Z | [
"pytorch",
"xlm-roberta",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"config.json",
"model_args.json",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
]
| a-ware | 24 | transformers | |
a-ware/xlmroberta-squadv2 | 2020-12-11T21:31:05.000Z | [
"pytorch",
"xlm-roberta",
"question-answering",
"dataset:squad_v2",
"arxiv:1911.02116",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"model_args.json",
"nbest_predictions_test.json",
"null_odds_test.json",
"optimizer.pt",
"predictions_test.json",
"pytorch_model.bin",
"scheduler.pt",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
]
| a-ware | 181 | transformers | ---
datasets:
- squad_v2
---
# XLM-ROBERTA-LARGE finetuned on SQuADv2
This is xlm-roberta-large model finetuned on SQuADv2 dataset for question answering task
## Model details
XLM-Roberta was propsed in the [paper](https://arxiv.org/pdf/1911.02116.pdf) **XLM-R: State-of-the-art cross-lingual understanding through self-supervision
## Model training
This model was trained with following parameters using simpletransformers wrapper:
```
train_args = {
'learning_rate': 1e-5,
'max_seq_length': 512,
'doc_stride': 512,
'overwrite_output_dir': True,
'reprocess_input_data': False,
'train_batch_size': 8,
'num_train_epochs': 2,
'gradient_accumulation_steps': 2,
'no_cache': True,
'use_cached_eval_features': False,
'save_model_every_epoch': False,
'output_dir': "bart-squadv2",
'eval_batch_size': 32,
'fp16_opt_level': 'O2',
}
```
## Results
```{"correct": 6961, "similar": 4359, "incorrect": 553, "eval_loss": -12.177856394381962}```
## Model in Action 🚀
```python3
from transformers import XLMRobertaTokenizer, XLMRobertaForQuestionAnswering
import torch
tokenizer = XLMRobertaTokenizer.from_pretrained('a-ware/xlmroberta-squadv2')
model = XLMRobertaForQuestionAnswering.from_pretrained('a-ware/xlmroberta-squadv2')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
encoding = tokenizer(question, text, return_tensors='pt')
input_ids = encoding['input_ids']
attention_mask = encoding['attention_mask']
start_scores, end_scores = model(input_ids, attention_mask=attention_mask, output_attentions=False)[:2]
all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
answer = ' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1])
answer = tokenizer.convert_tokens_to_ids(answer.split())
answer = tokenizer.decode(answer)
#answer => 'a nice puppet'
```
> Created with ❤️ by A-ware UG [](https://github.com/aware-ai)
|
a1noack/bart-large-gigaword | 2021-04-20T01:23:25.000Z | [
"pytorch",
"bart",
"transformers",
"summarization",
"license:mit"
]
| summarization | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"vocab.json"
]
| a1noack | 279 | transformers | ---
tags:
- summarization
license: mit
thumbnail: https://en.wikipedia.org/wiki/Bart_Simpson#/media/File:Bart_Simpson_200px.png
---
# BART for Gigaword
- This model was created by fine-tuning the `facebook/bart-large-cnn` weights (also on HuggingFace) for the Gigaword dataset. The model was fine-tuned on the Gigaword training set for 3 epochs, and the model with the highest ROUGE-1 score on the training set batches was kept.
- The BART Tokenizer for CNN-Dailymail was used in the fine-tuning process and that is the tokenizer that will be loaded automatically when doing:
```
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("a1noack/bart-large-gigaword")
```
# Summary generation
- This model achieves ROUGE-1 / ROUGE-2 / ROUGE-L of 37.28 / 18.58 / 34.53 on the Gigaword test set; this is pretty good when compared to PEGASUS, `google/pegasus-gigaword`, which achieves 39.12 / 19.86 / 36.24.
- To achieve these results, generate text using the code below. `text_list` is a list of input text string.
```
input_ids_list = tokenizer(text_list, truncation=True, max_length=128,
return_tensors='pt', padding=True)['input_ids']
output_ids_list = model.generate(input_ids_list, min_length=0)
outputs_list = tokenizer.batch_decode(output_ids_list, skip_special_tokens=True,
clean_up_tokenization_spaces=False)
``` |
aRchMaGe/whatever | 2021-02-22T14:37:27.000Z | []
| [
".gitattributes"
]
| aRchMaGe | 0 | |||
aaaa/aaaa | 2021-01-22T19:24:44.000Z | []
| [
".gitattributes"
]
| aaaa | 0 | |||
aadelucia/GPT2_medium_narrative_finetuned_large | 2021-05-21T11:44:03.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| aadelucia | 11 | transformers | |
aadelucia/GPT2_medium_narrative_finetuned_medium | 2021-05-21T11:48:25.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"eval_results.txt",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| aadelucia | 10 | transformers | |
aakash123/ejej | 2021-02-24T16:29:37.000Z | []
| [
".gitattributes",
"README.md"
]
| aakash123 | 0 | |||
aakashD/t5_paraphrase | 2020-07-26T15:52:56.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin"
]
| aakashD | 19 | transformers | |
aapot/wav2vec2-large-xlsr-53-finnish | 2021-04-27T06:08:06.000Z | [
"pytorch",
"wav2vec2",
"fi",
"dataset:common_voice",
"transformers",
"audio",
"automatic-speech-recognition",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| aapot | 453 | transformers | ---
language: fi
datasets:
- common_voice
metrics:
- wer
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 Finnish by Aapo Tanskanen
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice fi
type: common_voice
args: fi
metrics:
- name: Test WER
type: wer
value: 32.378771
---
# Wav2Vec2-Large-XLSR-53-Finnish
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Finnish using the [Common Voice](https://huggingface.co/datasets/common_voice), [CSS10 Finnish](https://www.kaggle.com/bryanpark/finnish-single-speaker-speech-dataset) and [Finnish parliament session 2](https://b2share.eudat.eu/records/4df422d631544ce682d6af1d4714b2d4) datasets.
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import librosa
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "fi", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("aapot/wav2vec2-large-xlsr-53-finnish")
model = Wav2Vec2ForCTC.from_pretrained("aapot/wav2vec2-large-xlsr-53-finnish")
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the Finnish test data of Common Voice.
```python
import librosa
import torch
import torchaudio
from datasets import load_dataset, load_metric
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
test_dataset = load_dataset("common_voice", "fi", split="test")
wer = load_metric("wer")
processor = Wav2Vec2Processor.from_pretrained("aapot/wav2vec2-large-xlsr-53-finnish")
model = Wav2Vec2ForCTC.from_pretrained("aapot/wav2vec2-large-xlsr-53-finnish")
model.to("cuda")
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\'\...\…\–\é]'
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def speech_file_to_array_fn(batch):
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
# Preprocessing the datasets.
# We need to read the audio files as arrays
def evaluate(batch):
inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["pred_strings"] = processor.batch_decode(pred_ids)
return batch
result = test_dataset.map(evaluate, batched=True, batch_size=8)
print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
```
**Test Result**: 32.378771 %
## Training
The Common Voice `train`, `validation` and `other` datasets were used for training as well as `CSS10 Finnish` and `Finnish parliament session 2` datasets.
The script used for training can be found from [Google Colab](https://colab.research.google.com/drive/1vnEGC9BnNRmVyIHj-0UsVulh_cUYSGWA?usp=sharing) |
abbas/gpt2-horror-stories | 2021-05-21T11:50:54.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"train_results.txt",
"training_args.bin",
"vocab.json"
]
| abbas | 104 | transformers | |
abdinoor/bert-base-uncased | 2020-11-30T18:42:17.000Z | []
| [
".gitattributes"
]
| abdinoor | 0 | |||
abdulbaseer/will_lliw_gpt2 | 2021-05-21T03:10:49.000Z | []
| [
".gitattributes"
]
| abdulbaseer | 0 | |||
abelsaug/albert-xxl_test | 2021-02-08T22:04:35.000Z | []
| [
".gitattributes"
]
| abelsaug | 0 | |||
abhi1nandy2/Bible-roberta-base | 2021-05-20T12:39:19.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"en",
"transformers",
"English",
"Bible",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhi1nandy2 | 9 | transformers | ---
language: "en"
tags:
- English
- Bible
dataset:
- English Bible Translation Dataset
- Link: https://www.kaggle.com/oswinrh/bible
inference: false
---
Dataset - English Bible Translation Dataset (https://www.kaggle.com/oswinrh/bible)
*NOTE:* It is `roberta-base` fine-tuned (for MLM objective) for 1 epoch (using MLM objective) on the 7 `.csv` files mentioned above, which consist of around 5.5M words. |
abhi1nandy2/Craft-bionlp-roberta-base | 2021-05-20T12:40:32.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhi1nandy2 | 11 | transformers | |
abhi1nandy2/EManuals_roberta | 2021-05-20T12:42:54.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhi1nandy2 | 15 | transformers | |
abhi1nandy2/Europarl-roberta-base | 2021-05-20T12:44:00.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhi1nandy2 | 12 | transformers | |
abhiii/qna | 2021-05-05T13:49:57.000Z | []
| [
".gitattributes"
]
| abhiii | 0 | |||
abhijithneilabraham/longformer_covid_qa | 2021-05-13T19:09:22.000Z | [
"pytorch",
"longformer",
"question-answering",
"dataset:covid_qa_deepset",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| abhijithneilabraham | 188 | transformers | # Dataset
---
---
datasets:
- covid_qa_deepset
---
---
Covid 19 question answering data obtained from [covid_qa_deepset](https://huggingface.co/datasets/covid_qa_deepset).
# Original Repository
Repository for the fine tuning, inference and evaluation scripts can be found [here](https://github.com/abhijithneilabraham/Covid-QA).
# Model in action
```
import torch
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
tokenizer = AutoTokenizer.from_pretrained("abhijithneilabraham/longformer_covid_qa")
model = AutoModelForQuestionAnswering.from_pretrained("abhijithneilabraham/longformer_covid_qa")
question = "In this way, what do the mRNA-destabilising RBPs constitute ?"
text =
"""
In this way, mRNA-destabilising RBPs constitute a 'brake' on the immune system, which may ultimately be toggled therapeutically. I anticipate continued efforts in this area will lead to new methods of regaining control over inflammation in autoimmunity, selectively enhancing immunity in immunotherapy, and modulating RNA synthesis and virus replication during infection.
Another mRNA under post-transcriptional regulation by Regnase-1 and Roquin is Furin, which encodes a conserved proprotein convertase crucial in human health and disease. Furin, along with other PCSK family members, is widely implicated in immune regulation, cancer and the entry, maturation or release of a broad array of evolutionarily diverse viruses including human papillomavirus (HPV), influenza (IAV), Ebola (EboV), dengue (DenV) and human immunodeficiency virus (HIV). Here, Braun and Sauter review the roles of furin in these processes, as well as the history and future of furin-targeting therapeutics. 7 They also discuss their recent work revealing how two IFN-cinducible factors exhibit broad-spectrum inhibition of IAV, measles (MV), zika (ZikV) and HIV by suppressing furin activity. 8 Over the coming decade, I expect to see an ever-finer spatiotemporal resolution of host-oriented therapies to achieve safe, effective and broad-spectrum yet costeffective therapies for clinical use.
The increasing abundance of affordable, sensitive, high-throughput genome sequencing technologies has led to a recent boom in metagenomics and the cataloguing of the microbiome of our world. The MinION nanopore sequencer is one of the latest innovations in this space, enabling direct sequencing in a miniature form factor with only minimal sample preparation and a consumer-grade laptop computer. Nakagawa and colleagues here report on their latest experiments using this system, further improving its performance for use in resource-poor contexts for meningitis diagnoses. 9 While direct sequencing of viral genomic RNA is challenging, this system was recently used to directly sequence an RNA virus genome (IAV) for the first time. 10 I anticipate further improvements in the performance of such devices over the coming decade will transform virus surveillance efforts, the importance of which was underscored by the recent EboV and novel coronavirus (nCoV / COVID-19) outbreaks, enabling rapid deployment of antiviral treatments that take resistance-conferring mutations into account.
Decades of basic immunology research have provided a near-complete picture of the main armaments in the human antiviral arsenal. Nevertheless, this focus on mammalian defences and pathologies has sidelined examination of the types and roles of viruses and antiviral defences that exist throughout our biosphere. One case in point is the CRISPR/Cas antiviral immune system of prokaryotes, which is now repurposed as a revolutionary gene-editing biotechnology in plants and animals. 11 Another is the ancient lineage of nucleocytosolic large DNA viruses (NCLDVs), which are emerging human pathogens that possess enormous genomes of up to several megabases in size encoding hundreds of proteins with unique and unknown functions. 12 Moreover, hundreds of human-and avian-infective viruses such as IAV strain H5N1 are known, but recent efforts indicate the true number may be in the millions and many harbour zoonotic potential. 13 It is increasingly clear that host-virus interactions have generated truly vast yet poorly understood and untapped biodiversity. Closing this Special Feature, Watanabe and Kawaoka elaborate on neo-virology, an emerging field engaged in cataloguing and characterising this biodiversity through a global consortium. 14 I predict these efforts will unlock a vast wealth of currently unexplored biodiversity, leading to biotechnologies and treatments that leverage the host-virus interactions developed throughout evolution.
When biomedical innovations fall into the 'Valley of Death', patients who are therefore not reached all too often fall with them. Being entrusted with the resources and expectation to conceive, deliver and communicate dividends to society is both cherished and eagerly pursued at every stage of our careers. Nevertheless, the road to research translation is winding and is built on a foundation of basic research. Supporting industry-academia collaboration and nurturing talent and skills in the Indo-Pacific region are two of the four pillars of the National Innovation and Science Agenda. 2 These frame Australia's Medical Research and Innovation Priorities, which include antimicrobial resistance, global health and health security, drug repurposing and translational research infrastructure, 15 capturing many of the key elements of this CTI Special Feature. Establishing durable international relationships that integrate diverse expertise is essential to delivering these outcomes. To this end, NHMRC has recently taken steps under the International Engagement Strategy 16 to increase cooperation with its counterparts overseas. These include the Japan Agency for Medical Research and Development (AMED), tasked with translating the biomedical research output of that country. Given the reciprocal efforts at accelerating bilateral engagement currently underway, 17 the prospects for new areas of international cooperation and mobility have never been more exciting nor urgent. With the above in mind, all contributions to this CTI Special Feature I have selected from research presented by fellow invitees to the 2018 Awaji International Forum on Infection and Immunity (AIFII) and 2017 Consortium of Biological Sciences (ConBio) conferences in Japan. Both Australia and Japan have strong traditions in immunology and related disciplines, and I predict that the quantity, quality and importance of our bilateral cooperation will accelerate rapidly over the short to medium term. By expanding and cooperatively leveraging our respective research strengths, our efforts may yet solve the many pressing disease, cost and other sustainability issues of our time.
"""
encoding = tokenizer(question, text, return_tensors="pt")
input_ids = encoding["input_ids"]
# default is local attention everywhere
# the forward method will automatically set global attention on question tokens
attention_mask = encoding["attention_mask"]
start_scores, end_scores = model(input_ids, attention_mask=attention_mask)
all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
answer_tokens = all_tokens[torch.argmax(start_scores) :torch.argmax(end_scores)+1]
answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens))
# output => a 'brake' on the immune system
``` |
abhilash1910/albert-german-ner | 2021-03-07T13:52:26.000Z | [
"tf",
"albert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"Readme.md",
"config.json",
"special_tokens_map.json",
"spiece.model",
"test_predictions.txt",
"test_results.txt",
"tf_model.h5",
"tokenizer_config.json"
]
| abhilash1910 | 23 | transformers | ## German NER Albert Model
This is a trained Albert model for Token Classification in German ,[Germeval](https://sites.google.com/site/germeval2014ner/) and can be used for Inference.
## Model Specifications
- MAX_LENGTH=128
- MODEL='albert-base-v1'
- BATCH_SIZE=32
- NUM_EPOCHS=3
- SAVE_STEPS=750
- SEED=1
- SAVE_STEPS = 100
- LOGGING_STEPS = 100
- SEED = 42
### Usage Specifications
This model is trained on Tensorflow version and is compatible with the 'ner' pipeline of huggingface.
```python
from transformers import AutoTokenizer,TFAutoModelForTokenClassification
from transformers import pipeline
model=TFAutoModelForTokenClassification.from_pretrained('abhilash1910/albert-german-ner')
tokenizer=AutoTokenizer.from_pretrained('abhilash1910/albert-german-ner')
ner_model = pipeline('ner', model=model, tokenizer=tokenizer)
seq='Berlin ist die Hauptstadt von Deutschland'
ner_model(seq)
```
The Tensorflow version of Albert is used for training the model and the output for the above mentioned segment is as follows:
```
[{'entity': 'B-PERderiv',
'index': 1,
'score': 0.09580112248659134,
'word': '▁berlin'},
{'entity': 'B-ORGpart',
'index': 2,
'score': 0.08364498615264893,
'word': '▁is'},
{'entity': 'B-LOCderiv',
'index': 3,
'score': 0.07593920826911926,
'word': 't'},
{'entity': 'B-PERderiv',
'index': 4,
'score': 0.09574996680021286,
'word': '▁die'},
{'entity': 'B-LOCderiv',
'index': 5,
'score': 0.07097965478897095,
'word': '▁'},
{'entity': 'B-PERderiv',
'index': 6,
'score': 0.07122448086738586,
'word': 'haupt'},
{'entity': 'B-PERderiv',
'index': 7,
'score': 0.12397754937410355,
'word': 'stadt'},
{'entity': 'I-OTHderiv',
'index': 8,
'score': 0.0818650871515274,
'word': '▁von'},
{'entity': 'I-LOCderiv',
'index': 9,
'score': 0.08271490037441254,
'word': '▁'},
{'entity': 'B-LOCderiv',
'index': 10,
'score': 0.08616268634796143,
'word': 'deutschland'}]
```
## Resources
For all resources , please look into [huggingface](https://huggingface.com).
|
abhilash1910/distilbert-squadv1 | 2021-03-09T11:36:17.000Z | [
"pytorch",
"distilbert",
"question-answering",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"trainer_state.json",
"training_args.bin",
"vocab.txt"
]
| abhilash1910 | 8 | transformers | # DistilBERT--SQuAD-v1
Training is done on the [SQuAD](https://huggingface.co/datasets/squad) dataset. The model can be accessed via [HuggingFace](https://huggingface.co/abhilash1910/distilbert-squadv1):
## Model Specifications
We have used the following parameters:
- Training Batch Size : 512
- Learning Rate : 3e-5
- Training Epochs : 0.75
- Sequence Length : 384
- Stride : 128
## Usage Specifications
```python
from transformers import AutoModelForQuestionAnswering,AutoTokenizer,pipeline
model=AutoModelForQuestionAnswering.from_pretrained('abhilash1910/distilbert-squadv1')
tokenizer=AutoTokenizer.from_pretrained('abhilash1910/distilbert-squadv1')
nlp_QA=pipeline('question-answering',model=model,tokenizer=tokenizer)
QA_inp={
'question': 'What is the fund price of Huggingface in NYSE?',
'context': 'Huggingface Co. has a total fund price of $19.6 million dollars'
}
result=nlp_QA(QA_inp)
result
```
The result is:
```bash
{'score': 0.38547369837760925,
'start': 42,
'end': 55,
'answer': '$19.6 million'}
```
|
abhilash1910/financial_roberta | 2021-05-20T12:45:02.000Z | [
"pytorch",
"tf",
"jax",
"roberta",
"masked-lm",
"arxiv:1907.11692",
"transformers",
"finance",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| abhilash1910 | 564 | transformers | ---
tags:
- finance
---
# Roberta Masked Language Model Trained On Financial Phrasebank Corpus
This is a Masked Language Model trained with [Roberta](https://huggingface.co/transformers/model_doc/roberta.html) on a Financial Phrasebank Corpus.
The model is built using Huggingface transformers.
The model can be found at :[Financial_Roberta](https://huggingface.co/abhilash1910/financial_roberta)
## Specifications
The corpus for training is taken from the Financial Phrasebank (Malo et al)[https://www.researchgate.net/publication/251231107_Good_Debt_or_Bad_Debt_Detecting_Semantic_Orientations_in_Economic_Texts].
## Model Specification
The model chosen for training is [Roberta](https://arxiv.org/abs/1907.11692) with the following specifications:
1. vocab_size=56000
2. max_position_embeddings=514
3. num_attention_heads=12
4. num_hidden_layers=6
5. type_vocab_size=1
This is trained by using RobertaConfig from transformers package.
The model is trained for 10 epochs with a gpu batch size of 64 units.
## Usage Specifications
For using this model, we have to first import AutoTokenizer and AutoModelWithLMHead Modules from transformers
After that we have to specify, the pre-trained model,which in this case is 'abhilash1910/financial_roberta' for the tokenizers and the model.
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("abhilash1910/financial_roberta")
model = AutoModelWithLMHead.from_pretrained("abhilash1910/financial_roberta")
```
After this the model will be downloaded, it will take some time to download all the model files.
For testing the model, we have to import pipeline module from transformers and create a masked output model for inference as follows:
```python
from transformers import pipeline
model_mask = pipeline('fill-mask', model='abhilash1910/inancial_roberta')
model_mask("The company had a <mask> of 20% in 2020.")
```
Some of the examples are also provided with generic financial statements:
Example 1:
```python
model_mask("The company had a <mask> of 20% in 2020.")
```
Output:
```bash
[{'sequence': '<s>The company had a profit of 20% in 2020.</s>',
'score': 0.023112965747714043,
'token': 421,
'token_str': 'Ġprofit'},
{'sequence': '<s>The company had a loss of 20% in 2020.</s>',
'score': 0.021379893645644188,
'token': 616,
'token_str': 'Ġloss'},
{'sequence': '<s>The company had a year of 20% in 2020.</s>',
'score': 0.0185744296759367,
'token': 443,
'token_str': 'Ġyear'},
{'sequence': '<s>The company had a sales of 20% in 2020.</s>',
'score': 0.018143286928534508,
'token': 428,
'token_str': 'Ġsales'},
{'sequence': '<s>The company had a value of 20% in 2020.</s>',
'score': 0.015319528989493847,
'token': 776,
'token_str': 'Ġvalue'}]
```
Example 2:
```python
model_mask("The <mask> is listed under NYSE")
```
Output:
```bash
[{'sequence': '<s>The company is listed under NYSE</s>',
'score': 0.1566661298274994,
'token': 359,
'token_str': 'Ġcompany'},
{'sequence': '<s>The total is listed under NYSE</s>',
'score': 0.05542507395148277,
'token': 522,
'token_str': 'Ġtotal'},
{'sequence': '<s>The value is listed under NYSE</s>',
'score': 0.04729423299431801,
'token': 776,
'token_str': 'Ġvalue'},
{'sequence': '<s>The order is listed under NYSE</s>',
'score': 0.02533523552119732,
'token': 798,
'token_str': 'Ġorder'},
{'sequence': '<s>The contract is listed under NYSE</s>',
'score': 0.02087237872183323,
'token': 635,
'token_str': 'Ġcontract'}]
```
## Resources
For all resources , please look into the [HuggingFace](https://huggingface.co/) Site and the [Repositories](https://github.com/huggingface).
|
abhilash1910/french-roberta | 2021-05-20T12:45:47.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"arxiv:1907.11692",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"log_history.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| abhilash1910 | 49 | transformers | # Roberta Trained Model For Masked Language Model On French Corpus :robot:
This is a Masked Language Model trained with [Roberta](https://huggingface.co/transformers/model_doc/roberta.html) on a small French News Corpus(Leipzig corpora).
The model is built using Huggingface transformers.
The model can be found at :[French-Roberta](https://huggingface.co/abhilash1910/french-roberta)
## Specifications
The corpus for training is taken from Leipzig Corpora (French News) , and is trained on a small set of the corpus (300K).
## Model Specification
The model chosen for training is [Roberta](https://arxiv.org/abs/1907.11692) with the following specifications:
1. vocab_size=32000
2. max_position_embeddings=514
3. num_attention_heads=12
4. num_hidden_layers=6
5. type_vocab_size=1
This is trained by using RobertaConfig from transformers package.The total training parameters :68124416
The model is trained for 100 epochs with a gpu batch size of 64 units.
More details for building custom models can be found at the [HuggingFace Blog](https://huggingface.co/blog/how-to-train)
## Usage Specifications
For using this model, we have to first import AutoTokenizer and AutoModelWithLMHead Modules from transformers
After that we have to specify, the pre-trained model,which in this case is 'abhilash1910/french-roberta' for the tokenizers and the model.
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("abhilash1910/french-roberta")
model = AutoModelWithLMHead.from_pretrained("abhilash1910/french-roberta")
```
After this the model will be downloaded, it will take some time to download all the model files.
For testing the model, we have to import pipeline module from transformers and create a masked output model for inference as follows:
```python
from transformers import pipeline
model_mask = pipeline('fill-mask', model='abhilash1910/french-roberta')
model_mask("Le tweet <mask>.")
```
Some of the examples are also provided with generic French sentences:
Example 1:
```python
model_mask("À ce jour, <mask> projet a entraîné")
```
Output:
```bash
[{'sequence': '<s>À ce jour, belles projet a entraîné</s>',
'score': 0.18685665726661682,
'token': 6504,
'token_str': 'Ġbelles'},
{'sequence': '<s>À ce jour,- projet a entraîné</s>',
'score': 0.0005200508167035878,
'token': 17,
'token_str': '-'},
{'sequence': '<s>À ce jour, de projet a entraîné</s>',
'score': 0.00045729897101409733,
'token': 268,
'token_str': 'Ġde'},
{'sequence': '<s>À ce jour, du projet a entraîné</s>',
'score': 0.0004307595663703978,
'token': 326,
'token_str': 'Ġdu'},
{'sequence': '<s>À ce jour," projet a entraîné</s>',
'score': 0.0004219160182401538,
'token': 6,
'token_str': '"'}]
```
Example 2:
```python
model_mask("C'est un <mask>")
```
Output:
```bash
[{'sequence': "<s>C'est un belles</s>",
'score': 0.16440927982330322,
'token': 6504,
'token_str': 'Ġbelles'},
{'sequence': "<s>C'est un de</s>",
'score': 0.0005495127406902611,
'token': 268,
'token_str': 'Ġde'},
{'sequence': "<s>C'est un du</s>",
'score': 0.00044988933950662613,
'token': 326,
'token_str': 'Ġdu'},
{'sequence': "<s>C'est un-</s>",
'score': 0.00044542422983795404,
'token': 17,
'token_str': '-'},
{'sequence': "<s>C'est un\t</s>",
'score': 0.00037563967634923756,
'token': 202,
'token_str': 'ĉ'}]
```
## Resources
For all resources , please look into the [HuggingFace](https://huggingface.co/) Site and the [Repositories](https://github.com/huggingface).
|
abhiramtirumala/DialoGPT-sarcastic-medium | 2021-05-27T21:33:38.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| abhiramtirumala | 13 | transformers | |
abhiramtirumala/DialoGPT-sarcastic | 2021-05-22T00:52:20.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"conversational",
"pipeline_tag:conversational",
"text-generation"
]
| conversational | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| abhiramtirumala | 566 | transformers |
---
pipeline_tag: conversational
---
This model is a fine-tuned version of Microsoft/DialoGPT-small trained to created sarcastic responses. |
abhishek/autonlp-hindi-asr | 2021-04-09T12:31:26.000Z | [
"pytorch",
"wav2vec2",
"transformers",
"autonlp",
"automatic-speech-recognition",
"audio"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhishek | 219 | transformers | ---
tags:
- autonlp
- automatic-speech-recognition
- audio
language: {language}
---
# Model Trained Using AutoNLP
- Problem type: Speech Recognition
|
abhishek/autonlp-imdb_eval-71421 | 2021-05-18T22:54:10.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"en",
"dataset:abhishek/autonlp-data-imdb_eval",
"transformers",
"autonlp"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"sample_input.pkl",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| abhishek | 17 | transformers | ---
tags: autonlp
language: en
widget:
- text: "I love AutoNLP 🤗"
datasets:
- abhishek/autonlp-data-imdb_eval
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 71421
## Validation Metrics
- Loss: 0.4114699363708496
- Accuracy: 0.8248248248248248
- Precision: 0.8305439330543933
- Recall: 0.8085539714867617
- AUC: 0.9088033420466026
- F1: 0.8194014447884417
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/abhishek/autonlp-imdb_eval-71421
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("abhishek/autonlp-imdb_eval-71421", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("abhishek/autonlp-imdb_eval-71421", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
abhishek/autonlp-imdb_sentiment_classification-31154 | 2021-05-20T12:46:38.000Z | [
"pytorch",
"jax",
"roberta",
"text-classification",
"en",
"transformers",
"autonlp"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"sample_input.pkl",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| abhishek | 106 | transformers | ---
tags: autonlp
language: en
widget:
- text: "I love AutoNLP 🤗"
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 31154
## Validation Metrics
- Loss: 0.19292379915714264
- Accuracy: 0.9395
- Precision: 0.9569557080474111
- Recall: 0.9204
- AUC: 0.9851040399999998
- F1: 0.9383219492302988
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/abhishek/autonlp-imdb_sentiment_classification-31154
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("abhishek/autonlp-imdb_sentiment_classification-31154", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("abhishek/autonlp-imdb_sentiment_classification-31154", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
abhishek/autonlp-japanese-sentiment-59362 | 2021-05-18T22:55:03.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"ja",
"dataset:abhishek/autonlp-data-japanese-sentiment",
"transformers",
"autonlp"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"sample_input.pkl",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| abhishek | 18 | transformers | ---
tags: autonlp
language: ja
widget:
- text: "I love AutoNLP 🤗"
datasets:
- abhishek/autonlp-data-japanese-sentiment
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 59362
## Validation Metrics
- Loss: 0.13092292845249176
- Accuracy: 0.9527127414314258
- Precision: 0.9634070704982427
- Recall: 0.9842171959602166
- AUC: 0.9667289746092403
- F1: 0.9737009564152002
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-59362
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("abhishek/autonlp-japanese-sentiment-59362", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("abhishek/autonlp-japanese-sentiment-59362", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
abhishek/autonlp-japanese-sentiment-59363 | 2021-05-18T22:56:15.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"ja",
"dataset:abhishek/autonlp-data-japanese-sentiment",
"transformers",
"autonlp"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"sample_input.pkl",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| abhishek | 1,002 | transformers | ---
tags: autonlp
language: ja
widget:
- text: "🤗AutoNLPが大好きです"
datasets:
- abhishek/autonlp-data-japanese-sentiment
---
# Model Trained Using AutoNLP
- Problem type: Binary Classification
- Model ID: 59363
## Validation Metrics
- Loss: 0.12651239335536957
- Accuracy: 0.9532079853817648
- Precision: 0.9729688278823665
- Recall: 0.9744633462616643
- AUC: 0.9717333684823413
- F1: 0.9737155136027014
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-59363
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("abhishek/autonlp-japanese-sentiment-59363", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("abhishek/autonlp-japanese-sentiment-59363", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
abjbpi/DS_small | 2021-06-04T11:23:14.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"conversational",
"text-generation"
]
| conversational | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| abjbpi | 29 | transformers | ---
tags:
- conversational
---
# Model v2 |
abjbpi/Dwight_Schrute | 2021-06-04T11:43:31.000Z | [
"pytorch",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"conversational",
"text-generation"
]
| conversational | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.json"
]
| abjbpi | 392 | transformers | ---
tags:
- conversational
---
# My Awesome Model |
abryee/TigXLNet | 2021-01-10T14:29:08.000Z | [
"pytorch",
"xlnet",
"arxiv:2006.07698",
"transformers"
]
| [
".gitattributes",
"README.md",
"added_tokens.json",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| abryee | 43 | transformers | # Transferring Monolingual Model to Low-Resource Language: The Case Of Tigrinya:
## Proposed Method:
<img src="data/proposed.png" height = "330" width ="760" >
The proposed method transfers a mono-lingual Transformer model into new target language at lexical level by learning new token embeddings. All implementation in this repo uses XLNet as a source Transformer model, however, other Transformer models can also be used similarly.
## Main files:
All files are IPython Notebook files which can be excuted simply in Google Colab.
- train.ipynb : Fine-tunes XLNet (mono-lingual transformer) on new target language (Tigrinya) sentiment analysis dataset. [](https://colab.research.google.com/drive/1bSSrKE-TSphUyrNB2UWhFI-Bkoz0a5l0?usp=sharing)
- test.ipynb : Evaluates the fine-tuned model on test data. [](https://colab.research.google.com/drive/17R1lvRjxILVNk971vzZT79o2OodwaNIX?usp=sharing)
- token_embeddings.ipynb : Trains a word2vec token embeddings for Tigrinya language. [](https://colab.research.google.com/drive/1hCtetAllAjBw28EVQkJFpiKdFtXmuxV7?usp=sharing)
- process_Tigrinya_comments.ipynb : Extracts Tigrinya comments from mixed language contents. [](https://colab.research.google.com/drive/1-ndLlBV-iLZNBW3Z8OfKAqUUCjvGbdZU?usp=sharing)
- extract_YouTube_comments.ipynb : Downloads available comments from a YouTube channel ID. [](https://colab.research.google.com/drive/1b7G85wHKe18y45JIDtvDJdO5dOkRmDdp?usp=sharing)
- auto_labelling.ipynb : Automatically labels Tigrinya comments in to positive or negative sentiments based on [Emoji's sentiment](http://kt.ijs.si/data/Emoji_sentiment_ranking/). [](https://colab.research.google.com/drive/1wnZf7CBBCIr966vRUITlxKCrANsMPpV7?usp=sharing)
## Tigrinya Tokenizer:
A [sentencepiece](https://github.com/google/sentencepiece) based tokenizer for Tigrinya has been released to the public and can be accessed as in the following:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("abryee/TigXLNet")
tokenizer.tokenize("ዋዋዋው እዛ ፍሊም ካብተን ዘድንቀን ሓንቲ ኢያ ሞ ብጣዕሚ ኢና ነመስግን ሓንቲ ክብላ ደልየ ዘሎኹ ሓደራኣኹም ኣብ ጊዜኹም ተረክቡ")
## TigXLNet:
A new general purpose transformer model for low-resource language Tigrinya is also released to the public and be accessed as in the following:
from transformers import AutoConfig, AutoModel
config = AutoConfig.from_pretrained("abryee/TigXLNet")
config.d_head = 64
model = AutoModel.from_pretrained("abryee/TigXLNet", config=config)
## Evaluation:
The proposed method is evaluated using two datasets:
- A newly created sentiment analysis dataset for low-resource language (Tigriyna).
<table>
<tr>
<td> <table>
<thead>
<tr>
<th><sub>Models</sub></th>
<th><sub>Configuration</sub></th>
<th><sub>F1-Score</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td rowspan=3><sub>BERT</sub></td>
<td rowspan=1><sub>+Frozen BERT weights</sub></td>
<td><sub>54.91</sub></td>
</tr>
<tr>
<td rowspan=1><sub>+Random embeddings</sub></td>
<td><sub>74.26</sub></td>
</tr>
<tr>
<td rowspan=1><sub>+Frozen token embeddings</sub></td>
<td><sub>76.35</sub></td>
</tr>
<tr>
<td rowspan=3><sub>mBERT</sub></td>
<td rowspan=1><sub>+Frozen mBERT weights</sub></td>
<td><sub>57.32</sub></td>
</tr>
<tr>
<td rowspan=1><sub>+Random embeddings</sub></td>
<td><sub>76.01</sub></td>
</tr>
<tr>
<td rowspan=1><sub>+Frozen token embeddings</sub></td>
<td><sub>77.51</sub></td>
</tr>
<tr>
<td rowspan=3><sub>XLNet</sub></td>
<td rowspan=1><sub>+Frozen XLNet weights</sub></td>
<td><strong><sub>68.14</sub></strong></td>
</tr>
<tr>
<td rowspan=1><sub>+Random embeddings</sub></td>
<td><strong><sub>77.83</sub></strong></td>
</tr>
<tr>
<td rowspan=1><sub>+Frozen token embeddings</sub></td>
<td><strong><sub>81.62</sub></strong></td>
</tr>
</tbody>
</table> </td>
<td><img src="data/effect_of_dataset_size.png" alt="3" width = 480px height = 280px></td>
</tr>
</table>
- Cross-lingual Sentiment dataset ([CLS](https://zenodo.org/record/3251672#.Xs65VzozbIU)).
<table>
<thead>
<tr>
<th rowspan=2><sub>Models</sub></th>
<th rowspan=1 colspan=3><sub>English</sub></th>
<th rowspan=1 colspan=3><sub>German</sub></th>
<th rowspan=1 colspan=3><sub>French</sub></th>
<th rowspan=1 colspan=3><sub>Japanese</sub></th>
<th rowspan=2><sub>Average</sub></th>
</tr>
<tr>
<th colspan=1><sub>Books</sub></th>
<th colspan=1><sub>DVD</sub></th>
<th colspan=1><sub>Music</sub></th>
<th colspan=1><sub>Books</sub></th>
<th colspan=1><sub>DVD</sub></th>
<th colspan=1><sub>Music</sub></th>
<th colspan=1><sub>Books</sub></th>
<th colspan=1><sub>DVD</sub></th>
<th colspan=1><sub>Music</sub></th>
<th colspan=1><sub>Books</sub></th>
<th colspan=1><sub>DVD</sub></th>
<th colspan=1><sub>Music</sub></th>
</tr>
</thead>
<tbody>
<tr>
<td colspan=1><sub>XLNet</sub></td>
<td colspan=1><sub><strong>92.90</strong></sub></td>
<td colspan=1><sub><strong>93.31</strong></sub></td>
<td colspan=1><sub><strong>92.02</strong></sub></td>
<td colspan=1><sub>85.23</sub></td>
<td colspan=1><sub>83.30</sub></td>
<td colspan=1><sub>83.89</sub></td>
<td colspan=1><sub>73.05</sub></td>
<td colspan=1><sub>69.80</sub></td>
<td colspan=1><sub>70.12</sub></td>
<td colspan=1><sub>83.20</sub></td>
<td colspan=1><sub><strong>86.07</strong></sub></td>
<td colspan=1><sub>85.24</sub></td>
<td colspan=1><sub>83.08</sub></td>
</tr>
<tr>
<td colspan=1><sub>mBERT</sub></td>
<td colspan=1><sub>92.78</sub></td>
<td colspan=1><sub>90.30</sub></td>
<td colspan=1><sub>91.88</sub></td>
<td colspan=1><sub><strong>88.65</strong></sub></td>
<td colspan=1><sub><strong>85.85</strong></sub></td>
<td colspan=1><sub><strong>90.38</strong></sub></td>
<td colspan=1><sub><strong>91.09</strong></sub></td>
<td colspan=1><sub><strong>88.57</strong></sub></td>
<td colspan=1><sub><strong>93.67</strong></sub></td>
<td colspan=1><sub><strong>84.35</strong></sub></td>
<td colspan=1><sub>81.77</sub></td>
<td colspan=1><sub><strong>87.53</strong></sub></td>
<td colspan=1><sub><strong>88.90</strong></sub></td>
</tr>
</tbody>
</table>
## Dataset used for this paper:
We have constructed new sentiment analysis dataset for Tigrinya language and it can be found in the zip file (Tigrinya Sentiment Analysis Dataset)
## Citing our paper:
Our paper can be accessed from ArXiv [link](https://arxiv.org/pdf/2006.07698.pdf), and please consider citing our work.
@misc{tela2020transferring,
title={Transferring Monolingual Model to Low-Resource Language: The Case of Tigrinya},
author={Abrhalei Tela and Abraham Woubie and Ville Hautamaki},
year={2020},
eprint={2006.07698},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
|
|
absa/basic_reference_recognizer-lapt-0.1 | 2020-10-29T11:39:52.000Z | [
"reference_recognizer",
"transformers"
]
| [
".gitattributes",
"config.json"
]
| absa | 13 | transformers | ||
absa/basic_reference_recognizer-rest-0.1 | 2020-10-29T11:39:42.000Z | [
"reference_recognizer",
"transformers"
]
| [
".gitattributes",
"config.json"
]
| absa | 15 | transformers | ||
absa/bert-lapt-0.1 | 2021-05-19T11:34:12.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 16 | transformers | ||
absa/bert-lapt-0.2 | 2021-05-19T11:34:39.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 24 | transformers | ||
absa/bert-rest-0.1 | 2021-05-19T11:35:05.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 19 | transformers | ||
absa/bert-rest-0.2 | 2021-05-19T11:35:32.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 16 | transformers | ||
absa/bert-rest-lapt-0.1 | 2021-05-19T11:35:58.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 15 | transformers | ||
absa/classifier-lapt-0.2.1 | 2021-05-19T11:36:22.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"callbacks.bin",
"config.json",
"experiment.log",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 12 | transformers | ||
absa/classifier-lapt-0.2 | 2021-05-19T11:36:56.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"callbacks.bin",
"config.json",
"experiment.log",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 18 | transformers | ||
absa/classifier-rest-0.1 | 2021-05-19T11:37:20.000Z | [
"tf",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| absa | 12 | transformers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.