modelId
stringlengths 4
112
| lastModified
stringlengths 24
24
| tags
list | pipeline_tag
stringclasses 21
values | files
list | publishedBy
stringlengths 2
37
| downloads_last_month
int32 0
9.44M
| library
stringclasses 15
values | modelCard
large_stringlengths 0
100k
|
---|---|---|---|---|---|---|---|---|
facebook/wav2vec2-large-xlsr-53-polish | 2021-03-17T07:54:08.000Z | [
"pytorch",
"wav2vec2",
"nl",
"dataset:common_voice",
"transformers",
"speech",
"audio",
"automatic-speech-recognition",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| facebook | 7 | transformers | ---
language: nl
datasets:
- common_voice
tags:
- speech
- audio
- automatic-speech-recognition
license: apache-2.0
---
## Evaluation on Common Voice PL Test
```python
import torchaudio
from datasets import load_dataset, load_metric
from transformers import (
Wav2Vec2ForCTC,
Wav2Vec2Processor,
)
import torch
import re
import sys
model_name = "facebook/wav2vec2-large-xlsr-53-polish"
device = "cuda"
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]' # noqa: W605
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
processor = Wav2Vec2Processor.from_pretrained(model_name)
ds = load_dataset("common_voice", "pl", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
batch["sampling_rate"] = resampler.new_freq
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
return batch
ds = ds.map(map_to_array)
def map_to_pred(batch):
features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt")
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["predicted"] = processor.batch_decode(pred_ids)
batch["target"] = batch["sentence"]
return batch
result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys()))
wer = load_metric("wer")
print(wer.compute(predictions=result["predicted"], references=result["target"]))
```
**Result**: 24.6 % |
facebook/wav2vec2-large-xlsr-53-portuguese | 2021-03-11T11:44:02.000Z | [
"pytorch",
"wav2vec2",
"pt",
"dataset:common_voice",
"transformers",
"speech",
"audio",
"automatic-speech-recognition",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| facebook | 2,125 | transformers | ---
language: pt
datasets:
- common_voice
tags:
- speech
- audio
- automatic-speech-recognition
license: apache-2.0
---
## Evaluation on Common Voice PT Test
```python
import torchaudio
from datasets import load_dataset, load_metric
from transformers import (
Wav2Vec2ForCTC,
Wav2Vec2Processor,
)
import torch
import re
import sys
model_name = "facebook/wav2vec2-large-xlsr-53-portuguese"
device = "cuda"
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]' # noqa: W605
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
processor = Wav2Vec2Processor.from_pretrained(model_name)
ds = load_dataset("common_voice", "pt", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
batch["sampling_rate"] = resampler.new_freq
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
return batch
ds = ds.map(map_to_array)
def map_to_pred(batch):
features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt")
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["predicted"] = processor.batch_decode(pred_ids)
batch["target"] = batch["sentence"]
return batch
result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys()))
wer = load_metric("wer")
print(wer.compute(predictions=result["predicted"], references=result["target"]))
```
**Result**: 27.1 % |
facebook/wav2vec2-large-xlsr-53-spanish | 2021-03-11T11:35:42.000Z | [
"pytorch",
"wav2vec2",
"es",
"dataset:common_voice",
"transformers",
"speech",
"audio",
"automatic-speech-recognition",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| facebook | 8,803 | transformers | ---
language: es
datasets:
- common_voice
tags:
- speech
- audio
- automatic-speech-recognition
license: apache-2.0
---
## Evaluation on Common Voice ES Test
```python
import torchaudio
from datasets import load_dataset, load_metric
from transformers import (
Wav2Vec2ForCTC,
Wav2Vec2Processor,
)
import torch
import re
import sys
model_name = "facebook/wav2vec2-large-xlsr-53-spanish"
device = "cuda"
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"]' # noqa: W605
model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device)
processor = Wav2Vec2Processor.from_pretrained(model_name)
ds = load_dataset("common_voice", "es", split="test", data_dir="./cv-corpus-6.1-2020-12-11")
resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000)
def map_to_array(batch):
speech, _ = torchaudio.load(batch["path"])
batch["speech"] = resampler.forward(speech.squeeze(0)).numpy()
batch["sampling_rate"] = resampler.new_freq
batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'")
return batch
ds = ds.map(map_to_array)
def map_to_pred(batch):
features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt")
input_values = features.input_values.to(device)
attention_mask = features.attention_mask.to(device)
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
pred_ids = torch.argmax(logits, dim=-1)
batch["predicted"] = processor.batch_decode(pred_ids)
batch["target"] = batch["sentence"]
return batch
result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys()))
wer = load_metric("wer")
print(wer.compute(predictions=result["predicted"], references=result["target"]))
```
**Result**: 17.6 % |
facebook/wav2vec2-large-xlsr-53 | 2021-06-09T18:53:03.000Z | [
"pytorch",
"wav2vec2",
"pretraining",
"multilingual",
"dataset:common_voice",
"arxiv:2006.13979",
"transformers",
"speech",
"license:apache-2.0"
]
| [
".gitattributes",
"README.md",
"config.json",
"preprocessor_config.json",
"pytorch_model.bin"
]
| facebook | 14,492 | transformers | ---
language: multilingual
datasets:
- common_voice
tags:
- speech
license: apache-2.0
---
# Wav2Vec2-XLSR-53
[Facebook's XLSR-Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
The base model pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. Note that this model should be fine-tuned on a downstream task, like Automatic Speech Recognition. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more information.
[Paper](https://arxiv.org/abs/2006.13979)
Authors: Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli
**Abstract**
This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages.
The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
# Usage
See [this notebook](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb) for more information on how to fine-tune the model.

|
|
facebook/wav2vec2-large | 2021-06-09T18:47:33.000Z | [
"pytorch",
"wav2vec2",
"pretraining",
"en",
"dataset:librispeech_asr",
"arxiv:2006.11477",
"transformers",
"speech",
"license:apache-2.0"
]
| [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin"
]
| facebook | 671 | transformers | ---
language: en
datasets:
- librispeech_asr
tags:
- speech
license: apache-2.0
---
# Wav2Vec2-Large
[Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/)
The base model pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. Note that this model should be fine-tuned on a downstream task, like Automatic Speech Recognition. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more information.
[Paper](https://arxiv.org/abs/2006.11477)
Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli
**Abstract**
We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data.
The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
# Usage
See [this notebook](https://colab.research.google.com/drive/1FjTsqbYKphl9kL-eILgUc-bl4zVThL8F?usp=sharing) for more information on how to fine-tune the model. |
|
facebook/wmt19-de-en | 2020-12-11T21:39:51.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"de",
"en",
"dataset:wmt19",
"arxiv:1907.06616",
"transformers",
"translation",
"wmt19",
"facebook",
"license:apache-2.0",
"text2text-generation"
]
| translation | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| facebook | 4,223 | transformers | ---
language:
- de
- en
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
thumbnail: https://huggingface.co/front/thumbnails/facebook.png
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for de-en.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-de-en"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "Maschinelles Lernen ist großartig, oder?"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # Machine learning is great, isn't it?
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
de-en | [42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750) | 41.35
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR=de-en
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{...,
year={2020},
title={Facebook FAIR's WMT19 News Translation Task Submission},
author={Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
booktitle={Proc. of WMT},
}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
|
facebook/wmt19-en-de | 2020-12-11T21:39:55.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"en",
"de",
"dataset:wmt19",
"arxiv:1907.06616",
"transformers",
"translation",
"wmt19",
"facebook",
"license:apache-2.0",
"text2text-generation"
]
| translation | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| facebook | 4,230 | transformers | ---
language:
- en
- de
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
thumbnail: https://huggingface.co/front/thumbnails/facebook.png
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for en-de.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-en-de"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "Machine learning is great, isn't it?"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # Maschinelles Lernen ist großartig, oder?
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
en-de | [43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862) | 42.83
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR=en-de
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{...,
year={2020},
title={Facebook FAIR's WMT19 News Translation Task Submission},
author={Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
booktitle={Proc. of WMT},
}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
|
facebook/wmt19-en-ru | 2020-12-11T21:39:58.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"en",
"ru",
"dataset:wmt19",
"arxiv:1907.06616",
"transformers",
"translation",
"wmt19",
"facebook",
"license:apache-2.0",
"text2text-generation"
]
| translation | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| facebook | 8,755 | transformers | ---
language:
- en
- ru
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
thumbnail: https://huggingface.co/front/thumbnails/facebook.png
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for en-ru.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-en-ru"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "Machine learning is great, isn't it?"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # Машинное обучение - это здорово, не так ли?
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
en-ru | [36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724) | 33.47
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR=en-ru
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{...,
year={2020},
title={Facebook FAIR's WMT19 News Translation Task Submission},
author={Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
booktitle={Proc. of WMT},
}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
|
facebook/wmt19-ru-en | 2020-12-11T21:40:01.000Z | [
"pytorch",
"fsmt",
"seq2seq",
"ru",
"en",
"dataset:wmt19",
"arxiv:1907.06616",
"transformers",
"translation",
"wmt19",
"facebook",
"license:apache-2.0",
"text2text-generation"
]
| translation | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab-src.json",
"vocab-tgt.json"
]
| facebook | 783 | transformers | ---
language:
- ru
- en
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
thumbnail: https://huggingface.co/front/thumbnails/facebook.png
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for ru-en.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-ru-en"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "Машинное обучение - это здорово, не так ли?"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # Machine learning is great, isn't it?
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
ru-en | [41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937) | 39.20
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR=ru-en
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{...,
year={2020},
title={Facebook FAIR's WMT19 News Translation Task Submission},
author={Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey},
booktitle={Proc. of WMT},
}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
|
faizank2/bhand | 2021-03-04T21:56:30.000Z | []
| [
".gitattributes"
]
| faizank2 | 0 | |||
faizank2/legal-ner | 2021-03-04T20:53:57.000Z | []
| [
".gitattributes"
]
| faizank2 | 0 | |||
faizraza/temp1 | 2021-06-12T11:10:53.000Z | []
| [
".gitattributes"
]
| faizraza | 0 | |||
faroit/open-unmix | 2021-02-20T21:19:23.000Z | []
| [
".gitattributes"
]
| faroit | 0 | |||
faten/test_model | 2021-03-30T12:36:44.000Z | []
| [
".gitattributes"
]
| faten | 0 | |||
fatvvs/model_name | 2021-06-07T12:13:18.000Z | []
| [
".gitattributes"
]
| fatvvs | 0 | |||
faust/broken_t5_squad2 | 2020-07-04T08:42:08.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"added_tokens.json",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tokenizer_config.json"
]
| faust | 13 | transformers | |
fbu63711/dfgdfgdf | 2020-11-18T07:34:06.000Z | []
| [
".gitattributes"
]
| fbu63711 | 0 | |||
feconroses/autonlp-SaaS_Topic_Detection-99369 | 2021-04-11T15:07:14.000Z | [
"pytorch",
"distilbert",
"text-classification",
"en",
"transformers",
"autonlp"
]
| text-classification | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"sample_input.pkl",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| feconroses | 80 | transformers | ---
tags: autonlp
language: en
widget:
- text: "I love AutoNLP 🤗"
---
# Model Trained Using AutoNLP
- Problem type: Multi-class Classification
- Model ID: 99369
## Validation Metrics
- Loss: 2.408306121826172
- Accuracy: 0.2708333333333333
- Macro F1: 0.1101851851851852
- Micro F1: 0.2708333333333333
- Weighted F1: 0.22777777777777777
- Macro Precision: 0.10891812865497075
- Micro Precision: 0.2708333333333333
- Weighted Precision: 0.212719298245614
- Macro Recall: 0.12373737373737376
- Micro Recall: 0.2708333333333333
- Weighted Recall: 0.2708333333333333
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/feconroses/autonlp-SaaS_Topic_Detection-99369
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("feconroses/autonlp-SaaS_Topic_Detection-99369", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("feconroses/autonlp-SaaS_Topic_Detection-99369", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` |
felflare/bert-restore-punctuation | 2021-05-24T03:04:47.000Z | [
"pytorch",
"bert",
"token-classification",
"en",
"dataset:yelp_polarity",
"transformers",
"punctuation",
"license:mit"
]
| token-classification | [
".gitattributes",
"README.md",
"config.json",
"model_args.json",
"optimizer.pt",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| felflare | 447 | transformers | ---
language:
- en
tags:
- punctuation
license: mit
datasets:
- yelp_polarity
metrics:
- f1
---
# ✨ bert-restore-punctuation
[]()
This a bert-base-uncased model finetuned for punctuation restoration on [Yelp Reviews](https://www.tensorflow.org/datasets/catalog/yelp_polarity_reviews).
The model predicts the punctuation and upper-casing of plain, lower-cased text. An example use case can be ASR output. Or other cases when text has lost punctuation.
This model is intended for direct use as a punctuation restoration model for the general English language. Alternatively, you can use this for further fine-tuning on domain-specific texts for punctuation restoration tasks.
Model restores the following punctuations -- **[! ? . , - : ; ' ]**
The model also restores the upper-casing of words.
-----------------------------------------------
## 🚋 Usage
**Below is a quick way to get up and running with the model.**
1. First, install the package.
```bash
pip install rpunct
```
2. Sample python code.
```python
from rpunct import RestorePuncts
# The default language is 'english'
rpunct = RestorePuncts()
rpunct.punctuate("""in 2018 cornell researchers built a high-powered detector that in combination with an algorithm-driven process called ptychography set a world record
by tripling the resolution of a state-of-the-art electron microscope as successful as it was that approach had a weakness it only worked with ultrathin samples that were
a few atoms thick anything thicker would cause the electrons to scatter in ways that could not be disentangled now a team again led by david muller the samuel b eckert
professor of engineering has bested its own record by a factor of two with an electron microscope pixel array detector empad that incorporates even more sophisticated
3d reconstruction algorithms the resolution is so fine-tuned the only blurring that remains is the thermal jiggling of the atoms themselves""")
# Outputs the following:
# In 2018, Cornell researchers built a high-powered detector that, in combination with an algorithm-driven process called Ptychography, set a world record by tripling the
# resolution of a state-of-the-art electron microscope. As successful as it was, that approach had a weakness. It only worked with ultrathin samples that were a few atoms
# thick. Anything thicker would cause the electrons to scatter in ways that could not be disentangled. Now, a team again led by David Muller, the Samuel B.
# Eckert Professor of Engineering, has bested its own record by a factor of two with an Electron microscope pixel array detector empad that incorporates even more
# sophisticated 3d reconstruction algorithms. The resolution is so fine-tuned the only blurring that remains is the thermal jiggling of the atoms themselves.
```
**This model works on arbitrarily large text in English language and uses GPU if available.**
-----------------------------------------------
## 📡 Training data
Here is the number of product reviews we used for finetuning the model:
| Language | Number of text samples|
| -------- | ----------------- |
| English | 560,000 |
We found the best convergence around _**3 epochs**_, which is what presented here and available via a download.
-----------------------------------------------
## 🎯 Accuracy
The fine-tuned model obtained the following accuracy on 45,990 held-out text samples:
| Accuracy | Overall F1 | Eval Support |
| -------- | ---------------------- | ------------------- |
| 91% | 90% | 45,990
Below is a breakdown of the performance of the model by each label:
| label | precision | recall | f1-score | support|
| --------- | -------------|-------- | ----------|--------|
| **!** | 0.45 | 0.17 | 0.24 | 424
| **!+Upper** | 0.43 | 0.34 | 0.38 | 98
| **'** | 0.60 | 0.27 | 0.37 | 11
| **,** | 0.59 | 0.51 | 0.55 | 1522
| **,+Upper** | 0.52 | 0.50 | 0.51 | 239
| **-** | 0.00 | 0.00 | 0.00 | 18
| **.** | 0.69 | 0.84 | 0.75 | 2488
| **.+Upper** | 0.65 | 0.52 | 0.57 | 274
| **:** | 0.52 | 0.31 | 0.39 | 39
| **:+Upper** | 0.36 | 0.62 | 0.45 | 16
| **;** | 0.00 | 0.00 | 0.00 | 17
| **?** | 0.54 | 0.48 | 0.51 | 46
| **?+Upper** | 0.40 | 0.50 | 0.44 | 4
| **none** | 0.96 | 0.96 | 0.96 |35352
| **Upper** | 0.84 | 0.82 | 0.83 | 5442
-----------------------------------------------
## ☕ Contact
Contact [Daulet Nurmanbetov]([email protected]) for questions, feedback and/or requests for similar models.
----------------------------------------------- |
felixhusen/poem | 2021-05-21T16:01:12.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| felixhusen | 134 | transformers | |
felixhusen/scientific | 2021-05-21T16:02:25.000Z | [
"pytorch",
"jax",
"gpt2",
"lm-head",
"causal-lm",
"transformers",
"text-generation"
]
| text-generation | [
".gitattributes",
"config.json",
"flax_model.msgpack",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.json"
]
| felixhusen | 41 | transformers | |
feng/BERT-wwm | 2021-04-13T07:37:25.000Z | []
| [
".gitattributes"
]
| feng | 0 | |||
fenrhjen/camembert_aux_amandes | 2020-12-20T18:22:33.000Z | [
"pytorch",
"camembert",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"eval_results.txt",
"merges.txt",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| fenrhjen | 34 | transformers | |
fernandopessagno/dsadsa | 2021-05-27T09:44:29.000Z | []
| [
".gitattributes"
]
| fernandopessagno | 0 | |||
fernandopessagno/test | 2021-01-27T09:32:24.000Z | []
| [
".gitattributes"
]
| fernandopessagno | 0 | |||
festival/no | 2021-04-15T21:33:35.000Z | []
| [
".gitattributes",
"README.md"
]
| festival | 0 | |||
ffarzad/xlnet_sentiment | 2021-02-23T18:37:31.000Z | []
| [
".gitattributes",
"xlnet_model.bin",
"xlnet_model__.bin"
]
| ffarzad | 0 | |||
ffrmns/t5-small_XSum-finetuned | 2021-04-20T00:59:58.000Z | [
"pytorch",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"spiece.model",
"tokenizer_config.json"
]
| ffrmns | 6 | transformers | |
fgjmfyhj/dtjndgyjkdrytkfr | 2021-04-03T20:02:25.000Z | []
| [
".gitattributes",
"README.md",
"sfhndsjdtjdrj"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/eruerujr6-fvgjfykj | 2021-03-15T23:56:53.000Z | []
| [
".gitattributes",
"README.mdqwdqef"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/ggfjfcgjfdsdg | 2021-03-13T22:08:33.000Z | []
| [
".gitattributes",
"README.md"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/seryseruyhed6tujr | 2021-04-08T20:39:02.000Z | []
| [
".gitattributes",
"srdgysrfthetjh"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/tjhdjddjftj | 2021-04-19T20:01:52.000Z | []
| [
".gitattributes",
"asegash"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/tjryjkrykrk | 2021-03-30T20:12:05.000Z | []
| [
".gitattributes"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/watch-army-of-the-dead-full-movie-online-free-netflix | 2021-05-25T21:45:25.000Z | []
| [
".gitattributes",
"README.md"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/zdgbsfhbsxfhs | 2021-03-26T20:09:10.000Z | []
| [
".gitattributes",
"zdbsfnsxfnsxnsxfdndf"
]
| fgjmfyhj | 0 | |||
fgjmfyhj/zdsgbsfhbsfnsx | 2021-03-28T00:17:38.000Z | []
| [
".gitattributes",
"zdsgbvsazdrhbsfhs"
]
| fgjmfyhj | 0 | |||
fhswf/bert_de_ner | 2021-05-19T16:49:54.000Z | [
"pytorch",
"tf",
"jax",
"bert",
"token-classification",
"de",
"dataset:germeval_14",
"transformers",
"license:cc-by-sa-4.0",
"German",
"NER"
]
| token-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tf_model.h5",
"tokenizer_config.json",
"vocab.txt"
]
| fhswf | 1,696 | transformers | ---
language: de
license: cc-by-sa-4.0
datasets:
- germeval_14
tags:
- German
- de
- NER
---
# BERT-DE-NER
## What is it?
This is a German BERT model fine-tuned for named entity recognition.
## Base model & training
This model is based on [bert-base-german-dbmdz-cased](https://huggingface.co/bert-base-german-dbmdz-cased) and has been fine-tuned
for NER on the training data from [GermEval2014](https://sites.google.com/site/germeval2014ner).
## Model results
The results on the test data from GermEval2014 are (entities only):
| Precision | Recall | F1-Score |
|----------:|-------:|---------:|
| 0.817 | 0.842 | 0.829 |
## How to use
```Python
>>> from transformers import pipeline
>>> classifier = pipeline('ner', model="fhswf/bert_de_ner")
>>> classifier('Von der Organisation „medico international“ hieß es, die EU entziehe sich seit vielen Jahren der Verantwortung für die Menschen an ihren Außengrenzen.')
[{'word': 'med', 'score': 0.9996621608734131, 'entity': 'B-ORG', 'index': 6},
{'word': '##ico', 'score': 0.9995362162590027, 'entity': 'I-ORG', 'index': 7},
{'word': 'international',
'score': 0.9996932744979858,
'entity': 'I-ORG',
'index': 8},
{'word': 'eu', 'score': 0.9997008442878723, 'entity': 'B-ORG', 'index': 14}]
```
|
fhzh123/k_nc_bert | 2021-04-13T14:40:32.000Z | []
| [
".gitattributes"
]
| fhzh123 | 0 | |||
figge/intpolitics | 2021-03-23T14:40:43.000Z | []
| [
".gitattributes"
]
| figge | 0 | |||
finiteautomata/bert-contextualized-hate-category-es | 2021-05-19T16:50:33.000Z | [
"pytorch",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| finiteautomata | 13 | transformers | ||
finiteautomata/bert-contextualized-hate-speech-es | 2021-05-19T16:51:14.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| finiteautomata | 34 | transformers | |
finiteautomata/bert-non-contextualized-hate-category-es | 2021-05-19T16:51:52.000Z | [
"pytorch",
"bert",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| finiteautomata | 14 | transformers | ||
finiteautomata/bert-non-contextualized-hate-speech-es | 2021-05-19T16:52:40.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.txt"
]
| finiteautomata | 19 | transformers | |
finiteautomata/bert-title-body-hate-speech-es | 2021-04-21T02:02:28.000Z | []
| [
".gitattributes"
]
| finiteautomata | 0 | |||
finiteautomata/bertweet-base-emotion-analysis | 2021-05-27T02:47:52.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"added_tokens.json",
"bpe.codes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| finiteautomata | 248 | transformers | |
finiteautomata/bertweet-base-sentiment-analysis | 2021-05-26T11:55:15.000Z | [
"pytorch",
"roberta",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"README.md",
"added_tokens.json",
"bpe.codes",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| finiteautomata | 407 | transformers | # Sentiment Analysis in English
## bertweet-sentiment-analysis
Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/)
Model trained with SemEval 2017 corpus (around ~40k tweets). Base model is [BerTweet](https://github.com/VinAIResearch/BERTweet), a RoBERTa model trained on English tweets.
Uses `POS`, `NEG`, `NEU` labels.
**Coming soon**: a brief paper describing the model and training.
Enjoy! 🤗
|
finiteautomata/beto-emotion-analysis | 2021-05-27T02:48:42.000Z | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"README.md",
"added_tokens.json",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| finiteautomata | 623 | transformers | # Emotion Analysis in Spanish
## beto-emotion-analysis
Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/)
Model trained with TASS 2020 Task 2 corpus for Emotion detection in Spanish. Base model is [BETO](https://github.com/dccuchile/beto), a BERT model trained in Spanish.
**Coming soon**: a brief paper describing the model and training.
Enjoy! 🤗
|
finiteautomata/beto-sentiment-analysis | 2021-05-24T18:31:19.000Z | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | [
".gitattributes",
"README.md",
"added_tokens.json",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json",
"vocab.txt"
]
| finiteautomata | 647,994 | transformers | # Sentiment Analysis in Spanish
## beto-sentiment-analysis
Repository: [https://github.com/finiteautomata/pysentimiento/](https://github.com/finiteautomata/pysentimiento/)
Model trained with TASS 2020 corpus (around ~5k tweets) of several dialects of Spanish. Base model is [BETO](https://github.com/dccuchile/beto), a BERT model trained in Spanish.
Uses `POS`, `NEG`, `NEU` labels.
**Coming soon**: a brief paper describing the model and training.
Enjoy! 🤗
|
firstmediabuyer/sber | 2020-11-15T12:35:34.000Z | []
| [
".gitattributes"
]
| firstmediabuyer | 0 | |||
flair/chunk-english-fast | 2021-03-02T21:59:23.000Z | [
"pytorch",
"en",
"dataset:conll2000",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2000
widget:
- text: "The happy man has been eating at the diner"
---
## English Chunking in Flair (fast model)
This is the fast phrase chunking model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **96,22** (CoNLL-2000)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| ADJP | adjectival |
| ADVP | adverbial |
| CONJP | conjunction |
| INTJ | interjection |
| LST | list marker |
| NP | noun phrase |
| PP | prepositional |
| PRT | particle |
| SBAR | subordinate clause |
| VP | verb phrase |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/chunk-english-fast")
# make example sentence
sentence = Sentence("The happy man has been eating at the diner")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('np'):
print(entity)
```
This yields the following output:
```
Span [1,2,3]: "The happy man" [− Labels: NP (0.9958)]
Span [4,5,6]: "has been eating" [− Labels: VP (0.8759)]
Span [7]: "at" [− Labels: PP (1.0)]
Span [8,9]: "the diner" [− Labels: NP (0.9991)]
```
So, the spans "*The happy man*" and "*the diner*" are labeled as **noun phrases** (NP) and "*has been eating*" is labeled as a **verb phrase** (VP) in the sentence "*The happy man has been eating at the diner*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_2000
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_2000()
# 2. what tag do we want to predict?
tag_type = 'np'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/chunk-english-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/chunk-english | 2021-03-02T22:00:37.000Z | [
"pytorch",
"en",
"dataset:conll2000",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2000
widget:
- text: "The happy man has been eating at the diner"
---
## English Chunking in Flair (default model)
This is the standard phrase chunking model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **96,48** (CoNLL-2000)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| ADJP | adjectival |
| ADVP | adverbial |
| CONJP | conjunction |
| INTJ | interjection |
| LST | list marker |
| NP | noun phrase |
| PP | prepositional |
| PRT | particle |
| SBAR | subordinate clause |
| VP | verb phrase |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/chunk-english")
# make example sentence
sentence = Sentence("The happy man has been eating at the diner")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('np'):
print(entity)
```
This yields the following output:
```
Span [1,2,3]: "The happy man" [− Labels: NP (0.9958)]
Span [4,5,6]: "has been eating" [− Labels: VP (0.8759)]
Span [7]: "at" [− Labels: PP (1.0)]
Span [8,9]: "the diner" [− Labels: NP (0.9991)]
```
So, the spans "*The happy man*" and "*the diner*" are labeled as **noun phrases** (NP) and "*has been eating*" is labeled as a **verb phrase** (VP) in the sentence "*The happy man has been eating at the diner*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_2000
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_2000()
# 2. what tag do we want to predict?
tag_type = 'np'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/chunk-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/frame-english-fast | 2021-03-02T22:01:45.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "George returned to Berlin to return his hat."
---
## English Verb Disambiguation in Flair (fast model)
This is the fast verb disambiguation model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **88,27** (Ontonotes) - predicts [Proposition Bank verb frames](http://verbs.colorado.edu/propbank/framesets-english-aliases/).
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/frame-english-fast")
# make example sentence
sentence = Sentence("George returned to Berlin to return his hat.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following frame tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('frame'):
print(entity)
```
This yields the following output:
```
Span [2]: "returned" [− Labels: return.01 (0.9867)]
Span [6]: "return" [− Labels: return.02 (0.4741)]
```
So, the word "*returned*" is labeled as **return.01** (as in *go back somewhere*) while "*return*" is labeled as **return.02** (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus = ColumnCorpus(
"resources/tasks/srl", column_format={1: "text", 11: "frame"}
)
# 2. what tag do we want to predict?
tag_type = 'frame'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
BytePairEmbeddings("en"),
FlairEmbeddings("news-forward-fast"),
FlairEmbeddings("news-backward-fast"),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/frame-english-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2019flair,
title={FLAIR: An easy-to-use framework for state-of-the-art NLP},
author={Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland},
booktitle={{NAACL} 2019, 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)},
pages={54--59},
year={2019}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/frame-english | 2021-03-02T22:02:55.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "George returned to Berlin to return his hat."
---
## English Verb Disambiguation in Flair (default model)
This is the standard verb disambiguation model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **89,34** (Ontonotes) - predicts [Proposition Bank verb frames](http://verbs.colorado.edu/propbank/framesets-english-aliases/).
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/frame-english")
# make example sentence
sentence = Sentence("George returned to Berlin to return his hat.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following frame tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('frame'):
print(entity)
```
This yields the following output:
```
Span [2]: "returned" [− Labels: return.01 (0.9951)]
Span [6]: "return" [− Labels: return.02 (0.6361)]
```
So, the word "*returned*" is labeled as **return.01** (as in *go back somewhere*) while "*return*" is labeled as **return.02** (as in *give back something*) in the sentence "*George returned to Berlin to return his hat*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus = ColumnCorpus(
"resources/tasks/srl", column_format={1: "text", 11: "frame"}
)
# 2. what tag do we want to predict?
tag_type = 'frame'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
BytePairEmbeddings("en"),
FlairEmbeddings("news-forward"),
FlairEmbeddings("news-backward"),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/frame-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2019flair,
title={FLAIR: An easy-to-use framework for state-of-the-art NLP},
author={Akbik, Alan and Bergmann, Tanja and Blythe, Duncan and Rasul, Kashif and Schweter, Stefan and Vollgraf, Roland},
booktitle={{NAACL} 2019, 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)},
pages={54--59},
year={2019}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-danish | 2021-02-26T15:33:02.000Z | [
"pytorch",
"da",
"dataset:DaNE",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"pytorch_model.bin"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: da
datasets:
- DaNE
widget:
- text: "Jens Peter Hansen kommer fra Danmark"
---
# Danish NER in Flair (default model)
This is the standard 4-class NER model for Danish that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **81.78** (DaNER)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on Transformer embeddings and LSTM-CRF.
---
# Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-danish")
# make example sentence
sentence = Sentence("Jens Peter Hansen kommer fra Danmark")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2,3]: "Jens Peter Hansen" [− Labels: PER (0.9961)]
Span [6]: "Danmark" [− Labels: LOC (0.9816)]
```
So, the entities "*Jens Peter Hansen*" (labeled as a **person**) and "*Danmark*" (labeled as a **location**) are found in the sentence "*Jens Peter Hansen kommer fra Danmark*".
---
### Training: Script to train this model
The model was trained by the [DaNLP project](https://github.com/alexandrainst/danlp) using the [DaNE corpus](https://github.com/alexandrainst/danlp/blob/master/docs/docs/datasets.md#danish-dependency-treebank-dane-dane). Check their repo for more information.
The following Flair script may be used to train such a model:
```python
from flair.data import Corpus
from flair.datasets import DANE
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = DANE()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('da'),
# contextual string embeddings, forward
FlairEmbeddings('da-forward'),
# contextual string embeddings, backward
FlairEmbeddings('da-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-danish',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following papers when using this model.
```
@inproceedings{akbik-etal-2019-flair,
title = "{FLAIR}: An Easy-to-Use Framework for State-of-the-Art {NLP}",
author = "Akbik, Alan and
Bergmann, Tanja and
Blythe, Duncan and
Rasul, Kashif and
Schweter, Stefan and
Vollgraf, Roland",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics (Demonstrations)",
year = "2019",
url = "https://www.aclweb.org/anthology/N19-4010",
pages = "54--59",
}
```
And check the [DaNLP project](https://github.com/alexandrainst/danlp) for more information.
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-dutch-large | 2021-05-08T15:36:03.000Z | [
"pytorch",
"nl",
"dataset:conll2003",
"arxiv:2011.06993",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: nl
datasets:
- conll2003
widget:
- text: "George Washington ging naar Washington"
---
## Dutch NER in Flair (large model)
This is the large 4-class NER model for Dutch that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **95,25** (CoNLL-03 Dutch)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/).
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-dutch-large")
# make example sentence
sentence = Sentence("George Washington ging naar Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (1.0)]
Span [5]: "Washington" [− Labels: LOC (1.0)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging naar Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
import torch
# 1. get the corpus
from flair.datasets import CONLL_03_DUTCH
corpus = CONLL_03_DUTCH()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize fine-tuneable transformer embeddings WITH document context
from flair.embeddings import TransformerWordEmbeddings
embeddings = TransformerWordEmbeddings(
model='xlm-roberta-large',
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
# 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection)
from flair.models import SequenceTagger
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type='ner',
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# 6. initialize trainer with AdamW optimizer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW)
# 7. run training with XLM parameters (20 epochs, small LR)
from torch.optim.lr_scheduler import OneCycleLR
trainer.train('resources/taggers/ner-dutch-large',
learning_rate=5.0e-6,
mini_batch_size=4,
mini_batch_chunk_size=1,
max_epochs=20,
scheduler=OneCycleLR,
embeddings_storage_mode='none',
weight_decay=0.,
)
)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{schweter2020flert,
title={FLERT: Document-Level Features for Named Entity Recognition},
author={Stefan Schweter and Alan Akbik},
year={2020},
eprint={2011.06993},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-dutch | 2021-03-02T22:03:57.000Z | [
"pytorch",
"nl",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"test.tsv",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: nl
datasets:
- conll2003
widget:
- text: "George Washington ging naar Washington."
---
# Dutch NER in Flair (default model)
This is the standard 4-class NER model for Dutch that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **92,58** (CoNLL-03)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on Transformer embeddings and LSTM-CRF.
---
# Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-dutch")
# make example sentence
sentence = Sentence("George Washington ging naar Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.997)]
Span [5]: "Washington" [− Labels: LOC (0.9996)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging naar Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03_DUTCH
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_03_DUTCH()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize embeddings
embeddings = TransformerWordEmbeddings('wietsedv/bert-base-dutch-cased')
# 5. initialize sequence tagger
tagger: SequenceTagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-dutch',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik-etal-2019-flair,
title = "{FLAIR}: An Easy-to-Use Framework for State-of-the-Art {NLP}",
author = "Akbik, Alan and
Bergmann, Tanja and
Blythe, Duncan and
Rasul, Kashif and
Schweter, Stefan and
Vollgraf, Roland",
booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics (Demonstrations)",
year = "2019",
url = "https://www.aclweb.org/anthology/N19-4010",
pages = "54--59",
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english-fast | 2021-02-26T15:39:34.000Z | [
"pytorch",
"en",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2003
widget:
- text: "George Washington went to Washington"
---
## English NER in Flair (fast model)
This is the fast 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **92,92** (corrected CoNLL-03)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english-fast")
# make example sentence
sentence = Sentence("George Washington went to Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.9515)]
Span [5]: "Washington" [− Labels: LOC (0.992)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_03()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('glove'),
# contextual string embeddings, forward
FlairEmbeddings('news-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english-large | 2021-05-08T15:36:27.000Z | [
"pytorch",
"en",
"dataset:conll2003",
"arxiv:2011.06993",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2003
widget:
- text: "George Washington went to Washington"
---
## English NER in Flair (large model)
This is the large 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **94,36** (corrected CoNLL-03)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/).
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english-large")
# make example sentence
sentence = Sentence("George Washington went to Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (1.0)]
Span [5]: "Washington" [− Labels: LOC (1.0)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
import torch
# 1. get the corpus
from flair.datasets import CONLL_03
corpus = CONLL_03()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize fine-tuneable transformer embeddings WITH document context
from flair.embeddings import TransformerWordEmbeddings
embeddings = TransformerWordEmbeddings(
model='xlm-roberta-large',
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
# 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection)
from flair.models import SequenceTagger
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type='ner',
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# 6. initialize trainer with AdamW optimizer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW)
# 7. run training with XLM parameters (20 epochs, small LR)
from torch.optim.lr_scheduler import OneCycleLR
trainer.train('resources/taggers/ner-english-large',
learning_rate=5.0e-6,
mini_batch_size=4,
mini_batch_chunk_size=1,
max_epochs=20,
scheduler=OneCycleLR,
embeddings_storage_mode='none',
weight_decay=0.,
)
)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{schweter2020flert,
title={FLERT: Document-Level Features for Named Entity Recognition},
author={Stefan Schweter and Alan Akbik},
year={2020},
eprint={2011.06993},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english-ontonotes-fast | 2021-03-02T22:05:17.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "On September 1st George Washington won 1 dollar."
---
## English NER in Flair (Ontonotes fast model)
This is the fast version of the 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **89.3** (Ontonotes)
Predicts 18 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| CARDINAL | cardinal value |
| DATE | date value |
| EVENT | event name |
| FAC | building name |
| GPE | geo-political entity |
| LANGUAGE | language name |
| LAW | law name |
| LOC | location name |
| MONEY | money name |
| NORP | affiliation |
| ORDINAL | ordinal value |
| ORG | organization name |
| PERCENT | percent value |
| PERSON | person name |
| PRODUCT | product name |
| QUANTITY | quantity value |
| TIME | time value |
| WORK_OF_ART | name of work of art |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english-ontonotes-fast")
# make example sentence
sentence = Sentence("On September 1st George Washington won 1 dollar.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [2,3]: "September 1st" [− Labels: DATE (0.9655)]
Span [4,5]: "George Washington" [− Labels: PERSON (0.8243)]
Span [7,8]: "1 dollar" [− Labels: MONEY (0.8022)]
```
So, the entities "*September 1st*" (labeled as a **date**), "*George Washington*" (labeled as a **person**) and "*1 dollar*" (labeled as a **money**) are found in the sentence "*On September 1st George Washington won 1 dollar*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('en-crawl'),
# contextual string embeddings, forward
FlairEmbeddings('news-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-english-ontonotes-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english-ontonotes-large | 2021-05-08T15:35:21.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"arxiv:2011.06993",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "On September 1st George won 1 dollar while watching Game of Thrones."
---
## English NER in Flair (Ontonotes large model)
This is the large 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **90.93** (Ontonotes)
Predicts 18 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| CARDINAL | cardinal value |
| DATE | date value |
| EVENT | event name |
| FAC | building name |
| GPE | geo-political entity |
| LANGUAGE | language name |
| LAW | law name |
| LOC | location name |
| MONEY | money name |
| NORP | affiliation |
| ORDINAL | ordinal value |
| ORG | organization name |
| PERCENT | percent value |
| PERSON | person name |
| PRODUCT | product name |
| QUANTITY | quantity value |
| TIME | time value |
| WORK_OF_ART | name of work of art |
Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/).
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english-ontonotes-large")
# make example sentence
sentence = Sentence("On September 1st George won 1 dollar while watching Game of Thrones.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [2,3]: "September 1st" [− Labels: DATE (1.0)]
Span [4]: "George" [− Labels: PERSON (1.0)]
Span [6,7]: "1 dollar" [− Labels: MONEY (1.0)]
Span [10,11,12]: "Game of Thrones" [− Labels: WORK_OF_ART (1.0)]
```
So, the entities "*September 1st*" (labeled as a **date**), "*George*" (labeled as a **person**), "*1 dollar*" (labeled as a **money**) and "Game of Thrones" (labeled as a **work of art**) are found in the sentence "*On September 1st George Washington won 1 dollar while watching Game of Thrones*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize fine-tuneable transformer embeddings WITH document context
from flair.embeddings import TransformerWordEmbeddings
embeddings = TransformerWordEmbeddings(
model='xlm-roberta-large',
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
# 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection)
from flair.models import SequenceTagger
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type='ner',
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# 6. initialize trainer with AdamW optimizer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW)
# 7. run training with XLM parameters (20 epochs, small LR)
from torch.optim.lr_scheduler import OneCycleLR
trainer.train('resources/taggers/ner-english-ontonotes-large',
learning_rate=5.0e-6,
mini_batch_size=4,
mini_batch_chunk_size=1,
max_epochs=20,
scheduler=OneCycleLR,
embeddings_storage_mode='none',
weight_decay=0.,
)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{schweter2020flert,
title={FLERT: Document-Level Features for Named Entity Recognition},
author={Stefan Schweter and Alan Akbik},
year={2020},
eprint={2011.06993},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english-ontonotes | 2021-03-02T22:07:31.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "On September 1st George Washington won 1 dollar."
---
## English NER in Flair (Ontonotes default model)
This is the 18-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **89.27** (Ontonotes)
Predicts 18 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| CARDINAL | cardinal value |
| DATE | date value |
| EVENT | event name |
| FAC | building name |
| GPE | geo-political entity |
| LANGUAGE | language name |
| LAW | law name |
| LOC | location name |
| MONEY | money name |
| NORP | affiliation |
| ORDINAL | ordinal value |
| ORG | organization name |
| PERCENT | percent value |
| PERSON | person name |
| PRODUCT | product name |
| QUANTITY | quantity value |
| TIME | time value |
| WORK_OF_ART | name of work of art |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english-ontonotes")
# make example sentence
sentence = Sentence("On September 1st George Washington won 1 dollar.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [2,3]: "September 1st" [− Labels: DATE (0.8824)]
Span [4,5]: "George Washington" [− Labels: PERSON (0.9604)]
Span [7,8]: "1 dollar" [− Labels: MONEY (0.9837)]
```
So, the entities "*September 1st*" (labeled as a **date**), "*George Washington*" (labeled as a **person**) and "*1 dollar*" (labeled as a **money**) are found in the sentence "*On September 1st George Washington won 1 dollar*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('en-crawl'),
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-english-ontonotes',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-english | 2021-03-02T22:11:28.000Z | [
"pytorch",
"en",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"test.tsv",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2003
widget:
- text: "George Washington went to Washington"
---
## English NER in Flair (default model)
This is the standard 4-class NER model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **93,06** (corrected CoNLL-03)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-english")
# make example sentence
sentence = Sentence("George Washington went to Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.9968)]
Span [5]: "Washington" [− Labels: LOC (0.9994)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_03()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('glove'),
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-french | 2021-02-26T15:43:57.000Z | [
"pytorch",
"fr",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"test.tsv",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: fr
datasets:
- conll2003
widget:
- text: "George Washington est allé à Washington"
---
## French NER in Flair (default model)
This is the standard 4-class NER model for French that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **90,61** (WikiNER)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-french")
# make example sentence
sentence = Sentence("George Washington est allé à Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.7394)]
Span [6]: "Washington" [− Labels: LOC (0.9161)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington est allé à Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import WIKINER_FRENCH
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = WIKINER_FRENCH()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('fr'),
# contextual string embeddings, forward
FlairEmbeddings('fr-forward'),
# contextual string embeddings, backward
FlairEmbeddings('fr-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-french',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-german-large | 2021-05-08T15:36:43.000Z | [
"pytorch",
"de",
"dataset:conll2003",
"arxiv:2011.06993",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: de
datasets:
- conll2003
widget:
- text: "George Washington ging nach Washington"
---
## German NER in Flair (large model)
This is the large 4-class NER model for German that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **92,31** (CoNLL-03 German revised)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/).
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-german-large")
# make example sentence
sentence = Sentence("George Washington ging nach Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (1.0)]
Span [5]: "Washington" [− Labels: LOC (1.0)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
import torch
# 1. get the corpus
from flair.datasets import CONLL_03_GERMAN
corpus = CONLL_03_GERMAN()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize fine-tuneable transformer embeddings WITH document context
from flair.embeddings import TransformerWordEmbeddings
embeddings = TransformerWordEmbeddings(
model='xlm-roberta-large',
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
# 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection)
from flair.models import SequenceTagger
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type='ner',
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# 6. initialize trainer with AdamW optimizer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW)
# 7. run training with XLM parameters (20 epochs, small LR)
from torch.optim.lr_scheduler import OneCycleLR
trainer.train('resources/taggers/ner-german-large',
learning_rate=5.0e-6,
mini_batch_size=4,
mini_batch_chunk_size=1,
max_epochs=20,
scheduler=OneCycleLR,
embeddings_storage_mode='none',
weight_decay=0.,
)
)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{schweter2020flert,
title={FLERT: Document-Level Features for Named Entity Recognition},
author={Stefan Schweter and Alan Akbik},
year={2020},
eprint={2011.06993},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-german-legal | 2021-02-26T15:40:55.000Z | [
"pytorch",
"de",
"dataset:legal",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: de
datasets:
- legal
widget:
- text: "Herr W. verstieß gegen § 36 Abs. 7 IfSG."
---
## NER for German Legal Text in Flair (default model)
This is the legal NER model for German that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **96,35** (LER German dataset)
Predicts 19 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| AN | Anwalt |
| EUN | Europäische Norm |
| GS | Gesetz |
| GRT | Gericht |
| INN | Institution |
| LD | Land |
| LDS | Landschaft |
| LIT | Literatur |
| MRK | Marke |
| ORG | Organisation |
| PER | Person |
| RR | Richter |
| RS | Rechtssprechung |
| ST | Stadt |
| STR | Straße |
| UN | Unternehmen |
| VO | Verordnung |
| VS | Vorschrift |
| VT | Vertrag |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
More details on the Legal NER dataset [here](https://github.com/elenanereiss/Legal-Entity-Recognition)
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-german-legal")
# make example sentence (don't use tokenizer since Rechtstexte are badly handled)
sentence = Sentence("Herr W. verstieß gegen § 36 Abs. 7 IfSG.", use_tokenizer=False)
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [2]: "W." [− Labels: PER (0.9911)]
Span [5,6,7,8,9]: "§ 36 Abs. 7 IfSG." [− Labels: GS (0.5353)]
```
So, the entities "*W.*" (labeled as a **person**) and "*§ 36 Abs. 7 IfSG*" (labeled as a **Gesetz**) are found in the sentence "*Herr W. verstieß gegen § 36 Abs. 7 IfSG.*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import LER_GERMAN
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = LER_GERMAN()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('de'),
# contextual string embeddings, forward
FlairEmbeddings('de-forward'),
# contextual string embeddings, backward
FlairEmbeddings('de-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-german-legal',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following papers when using this model.
```
@inproceedings{leitner2019fine,
author = {Elena Leitner and Georg Rehm and Julian Moreno-Schneider},
title = {{Fine-grained Named Entity Recognition in Legal Documents}},
booktitle = {Semantic Systems. The Power of AI and Knowledge
Graphs. Proceedings of the 15th International Conference
(SEMANTiCS 2019)},
year = 2019,
pages = {272--287},
pdf = {https://link.springer.com/content/pdf/10.1007%2F978-3-030-33220-4_20.pdf}}
```
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-german | 2021-02-26T15:38:47.000Z | [
"pytorch",
"de",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"test.tsv",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: de
datasets:
- conll2003
widget:
- text: "George Washington ging nach Washington"
---
## German NER in Flair (default model)
This is the standard 4-class NER model for German that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **87,94** (CoNLL-03 German revised)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-german")
# make example sentence
sentence = Sentence("George Washington ging nach Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.9977)]
Span [5]: "Washington" [− Labels: LOC (0.9895)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03_GERMAN
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the corpus
corpus: Corpus = CONLL_03_GERMAN()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('de'),
# contextual string embeddings, forward
FlairEmbeddings('de-forward'),
# contextual string embeddings, backward
FlairEmbeddings('de-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-german',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/ner-multi-fast | 2021-03-02T22:14:04.000Z | [
"pytorch",
"en de nl es",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en de nl es
datasets:
- conll2003
widget:
- text: "George Washington ging nach Washington"
---
## 4-Language NER in Flair (English, German, Dutch and Spanish)
This is the fast 4-class NER model for 4 CoNLL-03 languages that ships with [Flair](https://github.com/flairNLP/flair/). Also kind of works for related languages like French.
F1-Score: **91,51** (CoNLL-03 English), **85,72** (CoNLL-03 German revised), **86,22** (CoNLL-03 Dutch), **85,78** (CoNLL-03 Spanish)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-multi-fast")
# make example sentence in any of the four languages
sentence = Sentence("George Washington ging nach Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.9977)]
Span [5]: "Washington" [− Labels: LOC (0.9895)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03, CONLL_03_GERMAN, CONLL_03_DUTCH, CONLL_03_SPANISH
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the multi-language corpus
corpus: Corpus = MultiCorpus([
CONLL_03(), # English corpus
CONLL_03_GERMAN(), # German corpus
CONLL_03_DUTCH(), # Dutch corpus
CONLL_03_SPANISH(), # Spanish corpus
])
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('glove'),
# FastText embeddings
WordEmbeddings('de'),
# contextual string embeddings, forward
FlairEmbeddings('multi-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('multi-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-multi-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following papers when using this model.
```
@misc{akbik2019multilingual,
title={Multilingual sequence labeling with one model},
author={Akbik, Alan and Bergmann, Tanja and Vollgraf, Roland}
booktitle = {{NLDL} 2019, Northern Lights Deep Learning Workshop},
year = {2019}
}
```
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
|
flair/ner-multi | 2021-03-02T22:17:41.000Z | [
"pytorch",
"en",
"de",
"nl",
"es",
"multilingual",
"dataset:conll2003",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language:
- en
- de
- nl
- es
- multilingual
datasets:
- conll2003
widget:
- text: "George Washington ging nach Washington"
---
## 4-Language NER in Flair (English, German, Dutch and Spanish)
This is the standard 4-class NER model for 4 CoNLL-03 languages that ships with [Flair](https://github.com/flairNLP/flair/). Also kind of works for related languages like French.
F1-Score: **92,16** (CoNLL-03 English), **87,33** (CoNLL-03 German revised), **88,96** (CoNLL-03 Dutch), **86,65** (CoNLL-03 Spanish)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-multi")
# make example sentence in any of the four languages
sentence = Sentence("George Washington ging nach Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (0.9977)]
Span [5]: "Washington" [− Labels: LOC (0.9895)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import CONLL_03, CONLL_03_GERMAN, CONLL_03_DUTCH, CONLL_03_SPANISH
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. get the multi-language corpus
corpus: Corpus = MultiCorpus([
CONLL_03(), # English corpus
CONLL_03_GERMAN(), # German corpus
CONLL_03_DUTCH(), # Dutch corpus
CONLL_03_SPANISH(), # Spanish corpus
])
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# GloVe embeddings
WordEmbeddings('glove'),
# FastText embeddings
WordEmbeddings('de'),
# contextual string embeddings, forward
FlairEmbeddings('multi-forward'),
# contextual string embeddings, backward
FlairEmbeddings('multi-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/ner-multi',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{akbik2019multilingual,
title={Multilingual sequence labeling with one model},
author={Akbik, Alan and Bergmann, Tanja and Vollgraf, Roland}
booktitle = {{NLDL} 2019, Northern Lights Deep Learning Workshop},
year = {2019}
}
```
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
|
flair/ner-spanish-large | 2021-05-08T15:36:59.000Z | [
"pytorch",
"es",
"dataset:conll2003",
"arxiv:2011.06993",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: es
datasets:
- conll2003
widget:
- text: "George Washington fue a Washington"
---
## Spanish NER in Flair (large model)
This is the large 4-class NER model for Spanish that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **90,54** (CoNLL-03 Spanish)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on document-level XLM-R embeddings and [FLERT](https://arxiv.org/pdf/2011.06993v1.pdf/).
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/ner-spanish-large")
# make example sentence
sentence = Sentence("George Washington fue a Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [− Labels: PER (1.0)]
Span [5]: "Washington" [− Labels: LOC (1.0)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington fue a Washington*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
import torch
# 1. get the corpus
from flair.datasets import CONLL_03_SPANISH
corpus = CONLL_03_SPANISH()
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize fine-tuneable transformer embeddings WITH document context
from flair.embeddings import TransformerWordEmbeddings
embeddings = TransformerWordEmbeddings(
model='xlm-roberta-large',
layers="-1",
subtoken_pooling="first",
fine_tune=True,
use_context=True,
)
# 5. initialize bare-bones sequence tagger (no CRF, no RNN, no reprojection)
from flair.models import SequenceTagger
tagger = SequenceTagger(
hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type='ner',
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
)
# 6. initialize trainer with AdamW optimizer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus, optimizer=torch.optim.AdamW)
# 7. run training with XLM parameters (20 epochs, small LR)
from torch.optim.lr_scheduler import OneCycleLR
trainer.train('resources/taggers/ner-spanish-large',
learning_rate=5.0e-6,
mini_batch_size=4,
mini_batch_chunk_size=1,
max_epochs=20,
scheduler=OneCycleLR,
embeddings_storage_mode='none',
weight_decay=0.,
)
)
```
---
### Cite
Please cite the following paper when using this model.
```
@misc{schweter2020flert,
title={FLERT: Document-Level Features for Named Entity Recognition},
author={Stefan Schweter and Alan Akbik},
year={2020},
eprint={2011.06993},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/pos-english-fast | 2021-03-02T22:19:11.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "I love Berlin."
---
## English Part-of-Speech Tagging in Flair (fast model)
This is the fast part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **98,10** (Ontonotes)
Predicts fine-grained POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADD | Email |
|AFX | Affix |
|CC | Coordinating conjunction |
|CD | Cardinal number |
|DT | Determiner |
|EX | Existential there |
|FW | Foreign word |
|HYPH | Hyphen |
|IN | Preposition or subordinating conjunction |
|JJ | Adjective |
|JJR |Adjective, comparative |
|JJS | Adjective, superlative |
|LS | List item marker |
|MD | Modal |
|NFP | Superfluous punctuation |
|NN | Noun, singular or mass |
|NNP |Proper noun, singular |
|NNPS | Proper noun, plural |
|NNS |Noun, plural |
|PDT | Predeterminer |
|POS | Possessive ending |
|PRP | Personal pronoun |
|PRP$ | Possessive pronoun |
|RB | Adverb |
|RBR | Adverb, comparative |
|RBS | Adverb, superlative |
|RP | Particle |
|SYM | Symbol |
|TO | to |
|UH | Interjection |
|VB | Verb, base form |
|VBD | Verb, past tense |
|VBG | Verb, gerund or present participle |
|VBN | Verb, past participle |
|VBP | Verb, non-3rd person singular present |
|VBZ | Verb, 3rd person singular present |
|WDT | Wh-determiner |
|WP | Wh-pronoun |
|WP$ | Possessive wh-pronoun |
|WRB | Wh-adverb |
|XX | Unknown |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/pos-english-fast")
# make example sentence
sentence = Sentence("I love Berlin.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "I" [− Labels: PRP (1.0)]
Span [2]: "love" [− Labels: VBP (0.9998)]
Span [3]: "Berlin" [− Labels: NNP (0.9999)]
Span [4]: "." [− Labels: . (0.9998)]
```
So, the word "*I*" is labeled as a **pronoun** (PRP), "*love*" is labeled as a **verb** (VBP) and "*Berlin*" is labeled as a **proper noun** (NNP) in the sentence "*I love Berlin*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'pos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/pos-english-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/pos-english | 2021-03-02T22:20:07.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "I love Berlin."
---
## English Part-of-Speech Tagging in Flair (default model)
This is the standard part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **98,19** (Ontonotes)
Predicts fine-grained POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADD | Email |
|AFX | Affix |
|CC | Coordinating conjunction |
|CD | Cardinal number |
|DT | Determiner |
|EX | Existential there |
|FW | Foreign word |
|HYPH | Hyphen |
|IN | Preposition or subordinating conjunction |
|JJ | Adjective |
|JJR |Adjective, comparative |
|JJS | Adjective, superlative |
|LS | List item marker |
|MD | Modal |
|NFP | Superfluous punctuation |
|NN | Noun, singular or mass |
|NNP |Proper noun, singular |
|NNPS | Proper noun, plural |
|NNS |Noun, plural |
|PDT | Predeterminer |
|POS | Possessive ending |
|PRP | Personal pronoun |
|PRP$ | Possessive pronoun |
|RB | Adverb |
|RBR | Adverb, comparative |
|RBS | Adverb, superlative |
|RP | Particle |
|SYM | Symbol |
|TO | to |
|UH | Interjection |
|VB | Verb, base form |
|VBD | Verb, past tense |
|VBG | Verb, gerund or present participle |
|VBN | Verb, past participle |
|VBP | Verb, non-3rd person singular present |
|VBZ | Verb, 3rd person singular present |
|WDT | Wh-determiner |
|WP | Wh-pronoun |
|WP$ | Possessive wh-pronoun |
|WRB | Wh-adverb |
|XX | Unknown |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/pos-english")
# make example sentence
sentence = Sentence("I love Berlin.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "I" [− Labels: PRP (1.0)]
Span [2]: "love" [− Labels: VBP (1.0)]
Span [3]: "Berlin" [− Labels: NNP (0.9999)]
Span [4]: "." [− Labels: . (1.0)]
```
So, the word "*I*" is labeled as a **pronoun** (PRP), "*love*" is labeled as a **verb** (VBP) and "*Berlin*" is labeled as a **proper noun** (NNP) in the sentence "*I love Berlin*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'pos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/pos-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/upos-english-fast | 2021-03-02T22:21:02.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "I love Berlin."
---
## English Universal Part-of-Speech Tagging in Flair (fast model)
This is the fast universal part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **98,47** (Ontonotes)
Predicts universal POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADJ | adjective |
| ADP | adposition |
| ADV | adverb |
| AUX | auxiliary |
| CCONJ | coordinating conjunction |
| DET | determiner |
| INTJ | interjection |
| NOUN | noun |
| NUM | numeral |
| PART | particle |
| PRON | pronoun |
| PROPN | proper noun |
| PUNCT | punctuation |
| SCONJ | subordinating conjunction |
| SYM | symbol |
| VERB | verb |
| X | other |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/upos-english-fast")
# make example sentence
sentence = Sentence("I love Berlin.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "I" [− Labels: PRON (0.9996)]
Span [2]: "love" [− Labels: VERB (1.0)]
Span [3]: "Berlin" [− Labels: PROPN (0.9986)]
Span [4]: "." [− Labels: PUNCT (1.0)]
```
So, the word "*I*" is labeled as a **pronoun** (PRON), "*love*" is labeled as a **verb** (VERB) and "*Berlin*" is labeled as a **proper noun** (PROPN) in the sentence "*I love Berlin*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'upos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/upos-english-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/upos-english | 2021-03-02T22:21:49.000Z | [
"pytorch",
"en",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- ontonotes
widget:
- text: "I love Berlin."
---
## English Universal Part-of-Speech Tagging in Flair (default model)
This is the standard universal part-of-speech tagging model for English that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **98,6** (Ontonotes)
Predicts universal POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADJ | adjective |
| ADP | adposition |
| ADV | adverb |
| AUX | auxiliary |
| CCONJ | coordinating conjunction |
| DET | determiner |
| INTJ | interjection |
| NOUN | noun |
| NUM | numeral |
| PART | particle |
| PRON | pronoun |
| PROPN | proper noun |
| PUNCT | punctuation |
| SCONJ | subordinating conjunction |
| SYM | symbol |
| VERB | verb |
| X | other |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/upos-english")
# make example sentence
sentence = Sentence("I love Berlin.")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "I" [− Labels: PRON (0.9996)]
Span [2]: "love" [− Labels: VERB (1.0)]
Span [3]: "Berlin" [− Labels: PROPN (0.9986)]
Span [4]: "." [− Labels: PUNCT (1.0)]
```
So, the word "*I*" is labeled as a **pronoun** (PRON), "*love*" is labeled as a **verb** (VERB) and "*Berlin*" is labeled as a **proper noun** (PROPN) in the sentence "*I love Berlin*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import Corpus
from flair.datasets import ColumnCorpus
from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings
# 1. load the corpus (Ontonotes does not ship with Flair, you need to download and reformat into a column format yourself)
corpus: Corpus = ColumnCorpus(
"resources/tasks/onto-ner",
column_format={0: "text", 1: "pos", 2: "upos", 3: "ner"},
tag_to_bioes="ner",
)
# 2. what tag do we want to predict?
tag_type = 'upos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('news-forward'),
# contextual string embeddings, backward
FlairEmbeddings('news-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/upos-english',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/upos-multi-fast | 2021-03-02T22:22:55.000Z | [
"pytorch",
"en",
"de",
"fr",
"it",
"nl",
"pl",
"es",
"sv",
"da",
"no",
"fi",
"cs",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language:
- en
- de
- fr
- it
- nl
- pl
- es
- sv
- da
- no
- fi
- cs
datasets:
- ontonotes
widget:
- text: "Ich liebe Berlin, as they say."
---
## Multilingual Universal Part-of-Speech Tagging in Flair (fast model)
This is the fast multilingual universal part-of-speech tagging model that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **92,88** (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech)
Predicts universal POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADJ | adjective |
| ADP | adposition |
| ADV | adverb |
| AUX | auxiliary |
| CCONJ | coordinating conjunction |
| DET | determiner |
| INTJ | interjection |
| NOUN | noun |
| NUM | numeral |
| PART | particle |
| PRON | pronoun |
| PROPN | proper noun |
| PUNCT | punctuation |
| SCONJ | subordinating conjunction |
| SYM | symbol |
| VERB | verb |
| X | other |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/upos-multi-fast")
# make example sentence
sentence = Sentence("Ich liebe Berlin, as they say. ")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "Ich" [− Labels: PRON (0.9999)]
Span [2]: "liebe" [− Labels: VERB (0.9999)]
Span [3]: "Berlin" [− Labels: PROPN (0.9997)]
Span [4]: "," [− Labels: PUNCT (1.0)]
Span [5]: "as" [− Labels: SCONJ (0.9991)]
Span [6]: "they" [− Labels: PRON (0.9998)]
Span [7]: "say" [− Labels: VERB (0.9998)]
Span [8]: "." [− Labels: PUNCT (1.0)]
```
So, the words "*Ich*" and "*they*" are labeled as **pronouns** (PRON), while "*liebe*" and "*say*" are labeled as **verbs** (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import MultiCorpus
from flair.datasets import UD_ENGLISH, UD_GERMAN, UD_FRENCH, UD_ITALIAN, UD_POLISH, UD_DUTCH, UD_CZECH, \
UD_DANISH, UD_SPANISH, UD_SWEDISH, UD_NORWEGIAN, UD_FINNISH
from flair.embeddings import StackedEmbeddings, FlairEmbeddings
# 1. make a multi corpus consisting of 12 UD treebanks (in_memory=False here because this corpus becomes large)
corpus = MultiCorpus([
UD_ENGLISH(in_memory=False),
UD_GERMAN(in_memory=False),
UD_DUTCH(in_memory=False),
UD_FRENCH(in_memory=False),
UD_ITALIAN(in_memory=False),
UD_SPANISH(in_memory=False),
UD_POLISH(in_memory=False),
UD_CZECH(in_memory=False),
UD_DANISH(in_memory=False),
UD_SWEDISH(in_memory=False),
UD_NORWEGIAN(in_memory=False),
UD_FINNISH(in_memory=False),
])
# 2. what tag do we want to predict?
tag_type = 'upos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('multi-forward-fast'),
# contextual string embeddings, backward
FlairEmbeddings('multi-backward-fast'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=False)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/upos-multi-fast',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flair/upos-multi | 2021-03-02T22:16:39.000Z | [
"pytorch",
"en",
"de",
"fr",
"it",
"nl",
"pl",
"es",
"sv",
"da",
"no",
"fi",
"cs",
"dataset:ontonotes",
"flair",
"token-classification",
"sequence-tagger-model"
]
| token-classification | [
".gitattributes",
"README.md",
"loss.tsv",
"pytorch_model.bin",
"training.log"
]
| flair | 0 | flair | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language:
- en
- de
- fr
- it
- nl
- pl
- es
- sv
- da
- no
- fi
- cs
datasets:
- ontonotes
widget:
- text: "Ich liebe Berlin, as they say"
---
## Multilingual Universal Part-of-Speech Tagging in Flair (default model)
This is the default multilingual universal part-of-speech tagging model that ships with [Flair](https://github.com/flairNLP/flair/).
F1-Score: **98,47** (12 UD Treebanks covering English, German, French, Italian, Dutch, Polish, Spanish, Swedish, Danish, Norwegian, Finnish and Czech)
Predicts universal POS tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
|ADJ | adjective |
| ADP | adposition |
| ADV | adverb |
| AUX | auxiliary |
| CCONJ | coordinating conjunction |
| DET | determiner |
| INTJ | interjection |
| NOUN | noun |
| NUM | numeral |
| PART | particle |
| PRON | pronoun |
| PROPN | proper noun |
| PUNCT | punctuation |
| SCONJ | subordinating conjunction |
| SYM | symbol |
| VERB | verb |
| X | other |
Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF.
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("flair/upos-multi")
# make example sentence
sentence = Sentence("Ich liebe Berlin, as they say. ")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('pos'):
print(entity)
```
This yields the following output:
```
Span [1]: "Ich" [− Labels: PRON (0.9999)]
Span [2]: "liebe" [− Labels: VERB (0.9999)]
Span [3]: "Berlin" [− Labels: PROPN (0.9997)]
Span [4]: "," [− Labels: PUNCT (1.0)]
Span [5]: "as" [− Labels: SCONJ (0.9991)]
Span [6]: "they" [− Labels: PRON (0.9998)]
Span [7]: "say" [− Labels: VERB (0.9998)]
Span [8]: "." [− Labels: PUNCT (1.0)]
```
So, the words "*Ich*" and "*they*" are labeled as **pronouns** (PRON), while "*liebe*" and "*say*" are labeled as **verbs** (VERB) in the multilingual sentence "*Ich liebe Berlin, as they say*".
---
### Training: Script to train this model
The following Flair script was used to train this model:
```python
from flair.data import MultiCorpus
from flair.datasets import UD_ENGLISH, UD_GERMAN, UD_FRENCH, UD_ITALIAN, UD_POLISH, UD_DUTCH, UD_CZECH, \
UD_DANISH, UD_SPANISH, UD_SWEDISH, UD_NORWEGIAN, UD_FINNISH
from flair.embeddings import StackedEmbeddings, FlairEmbeddings
# 1. make a multi corpus consisting of 12 UD treebanks (in_memory=False here because this corpus becomes large)
corpus = MultiCorpus([
UD_ENGLISH(in_memory=False),
UD_GERMAN(in_memory=False),
UD_DUTCH(in_memory=False),
UD_FRENCH(in_memory=False),
UD_ITALIAN(in_memory=False),
UD_SPANISH(in_memory=False),
UD_POLISH(in_memory=False),
UD_CZECH(in_memory=False),
UD_DANISH(in_memory=False),
UD_SWEDISH(in_memory=False),
UD_NORWEGIAN(in_memory=False),
UD_FINNISH(in_memory=False),
])
# 2. what tag do we want to predict?
tag_type = 'upos'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
# 4. initialize each embedding we use
embedding_types = [
# contextual string embeddings, forward
FlairEmbeddings('multi-forward'),
# contextual string embeddings, backward
FlairEmbeddings('multi-backward'),
]
# embedding stack consists of Flair and GloVe embeddings
embeddings = StackedEmbeddings(embeddings=embedding_types)
# 5. initialize sequence tagger
from flair.models import SequenceTagger
tagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=False)
# 6. initialize trainer
from flair.trainers import ModelTrainer
trainer = ModelTrainer(tagger, corpus)
# 7. run training
trainer.train('resources/taggers/upos-multi',
train_with_dev=True,
max_epochs=150)
```
---
### Cite
Please cite the following paper when using this model.
```
@inproceedings{akbik2018coling,
title={Contextual String Embeddings for Sequence Labeling},
author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland},
booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics},
pages = {1638--1649},
year = {2018}
}
```
---
### Issues?
The Flair issue tracker is available [here](https://github.com/flairNLP/flair/issues/).
|
flaubert/flaubert_base_cased | 2021-05-19T16:54:23.000Z | [
"pytorch",
"flaubert",
"lm-head",
"masked-lm",
"fr",
"dataset:flaubert",
"transformers",
"license:mit",
"bert",
"language-model",
"flue",
"french",
"bert-base",
"flaubert-base",
"cased",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| flaubert | 7,990 | transformers | ---
language: fr
license: mit
datasets:
- flaubert
metrics:
- flue
tags:
- bert
- language-model
- flaubert
- flue
- french
- bert-base
- flaubert-base
- cased
---
# FlauBERT: Unsupervised Language Model Pre-training for French
**FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer.
Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert).
## FlauBERT models
| Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters |
| :------: | :---: | :---: | :---: | :---: |
| `flaubert-small-cased` | 6 | 8 | 512 | 54 M |
| `flaubert-base-uncased` | 12 | 12 | 768 | 137 M |
| `flaubert-base-cased` | 12 | 12 | 768 | 138 M |
| `flaubert-large-cased` | 24 | 16 | 1024 | 373 M |
**Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only.
## Using FlauBERT with Hugging Face's Transformers
```python
import torch
from transformers import FlaubertModel, FlaubertTokenizer
# Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased',
# 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased']
modelname = 'flaubert/flaubert_base_cased'
# Load pretrained model and tokenizer
flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True)
flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False)
# do_lowercase=False if using cased models, True if using uncased ones
sentence = "Le chat mange une pomme."
token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)])
last_layer = flaubert(token_ids)[0]
print(last_layer.shape)
# torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension)
# The BERT [CLS] token correspond to the first hidden state of the last layer
cls_embedding = last_layer[:, 0, :]
```
**Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one
of the following values:
```
['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased']
```
## References
If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers:
[LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf)
```
@InProceedings{le2020flaubert,
author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier},
title = {FlauBERT: Unsupervised Language Model Pre-training for French},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {2479--2490},
url = {https://www.aclweb.org/anthology/2020.lrec-1.302}
}
```
[TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/)
```
@inproceedings{le2020flaubert,
title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais},
author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier},
booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles},
pages = {268--278},
year = {2020},
organization = {ATALA}
}
``` |
flaubert/flaubert_base_uncased | 2021-05-19T16:54:45.000Z | [
"pytorch",
"flaubert",
"lm-head",
"masked-lm",
"fr",
"dataset:flaubert",
"transformers",
"license:mit",
"bert",
"language-model",
"flue",
"french",
"flaubert-base",
"uncased",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| flaubert | 1,724 | transformers | ---
language: fr
license: mit
datasets:
- flaubert
metrics:
- flue
tags:
- bert
- language-model
- flaubert
- flue
- french
- flaubert-base
- uncased
---
# FlauBERT: Unsupervised Language Model Pre-training for French
**FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer.
Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert).
## FlauBERT models
| Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters |
| :------: | :---: | :---: | :---: | :---: |
| `flaubert-small-cased` | 6 | 8 | 512 | 54 M |
| `flaubert-base-uncased` | 12 | 12 | 768 | 137 M |
| `flaubert-base-cased` | 12 | 12 | 768 | 138 M |
| `flaubert-large-cased` | 24 | 16 | 1024 | 373 M |
**Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only.
## Using FlauBERT with Hugging Face's Transformers
```python
import torch
from transformers import FlaubertModel, FlaubertTokenizer
# Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased',
# 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased']
modelname = 'flaubert/flaubert_base_cased'
# Load pretrained model and tokenizer
flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True)
flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False)
# do_lowercase=False if using cased models, True if using uncased ones
sentence = "Le chat mange une pomme."
token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)])
last_layer = flaubert(token_ids)[0]
print(last_layer.shape)
# torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension)
# The BERT [CLS] token correspond to the first hidden state of the last layer
cls_embedding = last_layer[:, 0, :]
```
**Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one
of the following values:
```
['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased']
```
## References
If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers:
[LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf)
```
@InProceedings{le2020flaubert,
author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier},
title = {FlauBERT: Unsupervised Language Model Pre-training for French},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {2479--2490},
url = {https://www.aclweb.org/anthology/2020.lrec-1.302}
}
```
[TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/)
```
@inproceedings{le2020flaubert,
title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais},
author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier},
booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles},
pages = {268--278},
year = {2020},
organization = {ATALA}
}
``` |
flaubert/flaubert_large_cased | 2021-05-19T16:55:50.000Z | [
"pytorch",
"flaubert",
"lm-head",
"masked-lm",
"fr",
"dataset:flaubert",
"transformers",
"license:mit",
"bert",
"language-model",
"flue",
"french",
"bert-large",
"flaubert-large",
"cased",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| flaubert | 12,781 | transformers | ---
language: fr
license: mit
datasets:
- flaubert
metrics:
- flue
tags:
- bert
- language-model
- flaubert
- flue
- french
- bert-large
- flaubert-large
- cased
---
# FlauBERT: Unsupervised Language Model Pre-training for French
**FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer.
Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert).
## FlauBERT models
| Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters |
| :------: | :---: | :---: | :---: | :---: |
| `flaubert-small-cased` | 6 | 8 | 512 | 54 M |
| `flaubert-base-uncased` | 12 | 12 | 768 | 137 M |
| `flaubert-base-cased` | 12 | 12 | 768 | 138 M |
| `flaubert-large-cased` | 24 | 16 | 1024 | 373 M |
**Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only.
## Using FlauBERT with Hugging Face's Transformers
```python
import torch
from transformers import FlaubertModel, FlaubertTokenizer
# Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased',
# 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased']
modelname = 'flaubert/flaubert_base_cased'
# Load pretrained model and tokenizer
flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True)
flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False)
# do_lowercase=False if using cased models, True if using uncased ones
sentence = "Le chat mange une pomme."
token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)])
last_layer = flaubert(token_ids)[0]
print(last_layer.shape)
# torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension)
# The BERT [CLS] token correspond to the first hidden state of the last layer
cls_embedding = last_layer[:, 0, :]
```
**Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one
of the following values:
```
['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased']
```
## References
If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers:
[LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf)
```
@InProceedings{le2020flaubert,
author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier},
title = {FlauBERT: Unsupervised Language Model Pre-training for French},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {2479--2490},
url = {https://www.aclweb.org/anthology/2020.lrec-1.302}
}
```
[TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/)
```
@inproceedings{le2020flaubert,
title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais},
author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier},
booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles},
pages = {268--278},
year = {2020},
organization = {ATALA}
}
``` |
flaubert/flaubert_small_cased | 2021-05-19T16:56:07.000Z | [
"pytorch",
"flaubert",
"lm-head",
"masked-lm",
"fr",
"dataset:flaubert",
"transformers",
"license:mit",
"bert",
"language-model",
"flue",
"french",
"flaubert-small",
"cased",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.json"
]
| flaubert | 6,467 | transformers | ---
language: fr
license: mit
datasets:
- flaubert
metrics:
- flue
tags:
- bert
- language-model
- flaubert
- flue
- french
- flaubert-small
- cased
---
# FlauBERT: Unsupervised Language Model Pre-training for French
**FlauBERT** is a French BERT trained on a very large and heterogeneous French corpus. Models of different sizes are trained using the new CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/eng/jean-zay/ ) supercomputer.
Along with FlauBERT comes [**FLUE**](https://github.com/getalp/Flaubert/tree/master/flue): an evaluation setup for French NLP systems similar to the popular GLUE benchmark. The goal is to enable further reproducible experiments in the future and to share models and progress on the French language.For more details please refer to the [official website](https://github.com/getalp/Flaubert).
## FlauBERT models
| Model name | Number of layers | Attention Heads | Embedding Dimension | Total Parameters |
| :------: | :---: | :---: | :---: | :---: |
| `flaubert-small-cased` | 6 | 8 | 512 | 54 M |
| `flaubert-base-uncased` | 12 | 12 | 768 | 137 M |
| `flaubert-base-cased` | 12 | 12 | 768 | 138 M |
| `flaubert-large-cased` | 24 | 16 | 1024 | 373 M |
**Note:** `flaubert-small-cased` is partially trained so performance is not guaranteed. Consider using it for debugging purpose only.
## Using FlauBERT with Hugging Face's Transformers
```python
import torch
from transformers import FlaubertModel, FlaubertTokenizer
# Choose among ['flaubert/flaubert_small_cased', 'flaubert/flaubert_base_uncased',
# 'flaubert/flaubert_base_cased', 'flaubert/flaubert_large_cased']
modelname = 'flaubert/flaubert_base_cased'
# Load pretrained model and tokenizer
flaubert, log = FlaubertModel.from_pretrained(modelname, output_loading_info=True)
flaubert_tokenizer = FlaubertTokenizer.from_pretrained(modelname, do_lowercase=False)
# do_lowercase=False if using cased models, True if using uncased ones
sentence = "Le chat mange une pomme."
token_ids = torch.tensor([flaubert_tokenizer.encode(sentence)])
last_layer = flaubert(token_ids)[0]
print(last_layer.shape)
# torch.Size([1, 8, 768]) -> (batch size x number of tokens x embedding dimension)
# The BERT [CLS] token correspond to the first hidden state of the last layer
cls_embedding = last_layer[:, 0, :]
```
**Notes:** if your `transformers` version is <=2.10.0, `modelname` should take one
of the following values:
```
['flaubert-small-cased', 'flaubert-base-uncased', 'flaubert-base-cased', 'flaubert-large-cased']
```
## References
If you use FlauBERT or the FLUE Benchmark for your scientific publication, or if you find the resources in this repository useful, please cite one of the following papers:
[LREC paper](http://www.lrec-conf.org/proceedings/lrec2020/pdf/2020.lrec-1.302.pdf)
```
@InProceedings{le2020flaubert,
author = {Le, Hang and Vial, Lo\"{i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb\'{e}, Beno\^{i}t and Besacier, Laurent and Schwab, Didier},
title = {FlauBERT: Unsupervised Language Model Pre-training for French},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
month = {May},
year = {2020},
address = {Marseille, France},
publisher = {European Language Resources Association},
pages = {2479--2490},
url = {https://www.aclweb.org/anthology/2020.lrec-1.302}
}
```
[TALN paper](https://hal.archives-ouvertes.fr/hal-02784776/)
```
@inproceedings{le2020flaubert,
title = {FlauBERT: des mod{\`e}les de langue contextualis{\'e}s pr{\'e}-entra{\^\i}n{\'e}s pour le fran{\c{c}}ais},
author = {Le, Hang and Vial, Lo{\"\i}c and Frej, Jibril and Segonne, Vincent and Coavoux, Maximin and Lecouteux, Benjamin and Allauzen, Alexandre and Crabb{\'e}, Beno{\^\i}t and Besacier, Laurent and Schwab, Didier},
booktitle = {Actes de la 6e conf{\'e}rence conjointe Journ{\'e}es d'{\'E}tudes sur la Parole (JEP, 31e {\'e}dition), Traitement Automatique des Langues Naturelles (TALN, 27e {\'e}dition), Rencontre des {\'E}tudiants Chercheurs en Informatique pour le Traitement Automatique des Langues (R{\'E}CITAL, 22e {\'e}dition). Volume 2: Traitement Automatique des Langues Naturelles},
pages = {268--278},
year = {2020},
organization = {ATALA}
}
``` |
fleek/wav2vec-large-xlsr-korean | 2021-06-08T08:05:41.000Z | [
"pytorch",
"wav2vec2",
"transformers"
]
| [
".gitattributes",
"config.json",
"optimizer.pt",
"preprocessor_config.json",
"pytorch_model.bin",
"scheduler.pt",
"special_tokens_map.config",
"tokenizer.config",
"trainer_state.json",
"training_args.bin",
"vocab.json"
]
| fleek | 11 | transformers | ||
flexudy/t5-base-multi-sentence-doctor | 2020-12-11T23:33:25.000Z | [
"pytorch",
"tf",
"t5",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"sent-banner.png",
"special_tokens_map.json",
"spiece.model",
"tf_model.h5",
"tokenizer_config.json"
]
| flexudy | 1,563 | transformers | 
# Sentence-Doctor
Sentence doctor is a T5 model that attempts to correct the errors or mistakes found in sentences. Model works on English, German and French text.
## 1. Problem:
Many NLP models depend on tasks like *Text Extraction Libraries, OCR, Speech to Text libraries* and **Sentence Boundary Detection**
As a consequence errors caused by these tasks in your NLP pipeline can affect the quality of models in applications. Especially since models are often trained on **clean** input.
## 2. Solution:
Here we provide a model that **attempts** to reconstruct sentences based on the its context (sourrounding text). The task is pretty straightforward:
* `Given an "erroneous" sentence, and its context, reconstruct the "intended" sentence`.
## 3. Use Cases:
* Attempt to repair noisy sentences that where extracted with OCR software or text extractors.
* Attempt to repair sentence boundaries.
* Example (in German): **Input: "und ich bin im**",
* Prefix_Context: "Hallo! Mein Name ist John", Postfix_Context: "Januar 1990 geboren."
* Output: "John und ich bin im Jahr 1990 geboren"
* Possibly sentence level spelling correction -- Although this is not the intended use.
* Input: "I went to church **las yesteday**" => Output: "I went to church last Sunday".
## 4. Disclaimer
Note how we always emphises on the word *attempt*. The current version of the model was only trained on **150K** sentences from the tatoeba dataset: https://tatoeba.org/eng. (50K per language -- En, Fr, De).
Hence, we strongly encourage you to finetune the model on your dataset. We might release a version trained on more data.
## 5. Datasets
We generated synthetic data from the tatoeba dataset: https://tatoeba.org/eng. Randomly applying different transformations on words and characters based on some probabilities. The datasets are available in the data folder (where **sentence_doctor_dataset_300K** is a larger dataset with 100K sentences for each language).
## 6. Usage
### 6.1 Preprocessing
* Let us assume we have the following text (Note that there are no punctuation marks in the text):
```python
text = "That is my job I am a medical doctor I save lives"
```
* You decided extract the sentences and for some obscure reason, you obtained these sentences:
```python
sentences = ["That is my job I a", "m a medical doct", "I save lives"]
```
* You now wish to correct the sentence **"m a medical doct"**.
Here is the single preprocessing step for the model:
```python
input_text = "repair_sentence: " + sentences[1] + " context: {" + sentences[0] + "}{" + sentences[2] + "} </s>"
```
**Explanation**:</br>
* We are telling the model to repair the sentence with the prefix "repair_sentence: "
* Then append the sentence we want to repair **sentence[1]** which is "m a medical doct"
* Next we give some context to the model. In the case, the context is some text that occured before the sentence and some text that appeard after the sentence in the original text.
* To do that, we append the keyword "context :"
* Append **{sentence[0]}** "{That is my job I a}". (Note how it is sourrounded by curly braces).
* Append **{sentence[2]}** "{I save lives}".
* At last we tell the model this is the end of the input with </s>.
```python
print(input_text) # repair_sentence: m a medical doct context: {That is my job I a}{or I save lives} </s>
```
<br/>
**The context is optional**, so the input could also be ```repair_sentence: m a medical doct context: {}{} </s>```
### 6.2 Inference
```python
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("flexudy/t5-base-multi-sentence-doctor")
model = AutoModelWithLMHead.from_pretrained("flexudy/t5-base-multi-sentence-doctor")
input_text = "repair_sentence: m a medical doct context: {That is my job I a}{or I save lives} </s>"
input_ids = tokenizer.encode(input_text, return_tensors="pt")
outputs = model.generate(input_ids, max_length=32, num_beams=1)
sentence = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
assert sentence == "I am a medical doctor."
```
## 7. Fine-tuning
We also provide a script `train_any_t5_task.py` that might help you fine-tune any Text2Text Task with T5. We added #TODO comments all over to help you use train with ease. For example:
```python
# TODO Set your training epochs
config.TRAIN_EPOCHS = 3
```
If you don't want to read the #TODO comments, just pass in your data like this
```python
# TODO Where is your data ? Enter the path
trainer.start("data/sentence_doctor_dataset_300.csv")
```
and voila!! Please feel free to correct any mistakes in the code and make a pull request.
## 8. Attribution
* [Huggingface](https://huggingface.co/) transformer lib for making this possible
* Abhishek Kumar Mishra's transformer [tutorial](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb) on text summarisation. Our training code is just a modified version of their code. So many thanks.
* We finetuned this model from the huggingface hub: WikinewsSum/t5-base-multi-combine-wiki-news. Thanks to the [authors](https://huggingface.co/WikinewsSum)
* We also read a lot of work from [Suraj Patil](https://github.com/patil-suraj)
* No one has been forgotten, hopefully :)
|
flexudy/t5-small-wav2vec2-grammar-fixer | 2021-02-16T01:56:40.000Z | [
"pytorch",
"tf"
]
| [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"spiece.model",
"tf_model.h5",
"tokenizer_config.json"
]
| flexudy | 266 | # flexudy-pipe-question-generation-v2
After transcribing your audio with Wav2Vec2, you might be interested in a post processor.
All paragraphs had at most 128 tokens (separated by white spaces)
```python
from transformers import T5Tokenizer, T5ForConditionalGeneration
model_name = "flexudy/t5-small-wav2vec2-grammar-fixer"
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
sent = """GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS"""
input_text = "fix: { " + sent + " } </s>"
input_ids = tokenizer.encode(input_text, return_tensors="pt", max_length=256, truncation=True, add_special_tokens=True)
outputs = model.generate(
input_ids=input_ids,
max_length=256,
num_beams=4,
repetition_penalty=1.0,
length_penalty=1.0,
early_stopping=True
)
sentence = tokenizer.decode(outputs[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
print(f"{sentence}")
```
INPUT 1:
```
WHEN ARE YOU COMING TOMORROW I AM ASKING BECAUSE OF THE MONEY YOU OWE ME PLEASE GIVE IT TO ME I AM WAITING YOU HAVE BEEN AVOIDING ME SINCE TWO THOUSAND AND THREE
```
OUTPUT 1:
```
When are you coming tomorrow? I am asking because of the money you owe me, please give it to me. I am waiting. You have been avoiding me since 2003.
```
INPUT 2:
```
GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS
```
OUTPUT 2:
```
Going along Slushy Country Roads and speaking to Damp audiences in Draughty School rooms day after day for a fortnight, he'll have to put in an appearance at some place of worship on Sunday morning and he can come to us immediately afterwards.
```
I strongly recommend improving the performance via further fine-tuning or by training more examples.
- Possible Quick Rule based improvements: Align the transcribed version and the generated version. If the similarity of two words (case-insensitive) vary by more than some threshold based on some similarity metric (e.g. Levenshtein), then keep the transcribed word. |
||
florinotto/123 | 2021-05-08T14:26:03.000Z | []
| [
".gitattributes"
]
| florinotto | 0 | |||
flozi00/byt5-german-grammar | 2021-06-07T09:28:37.000Z | [
"pytorch",
"t5",
"seq2seq",
"de",
"transformers",
"grammar",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json"
]
| flozi00 | 275 | transformers | ---
language: de
tags:
- grammar
widget:
- text: "correct german grammar: es ist schön so viele tolle menschen um sich zu haben denn ohne sie wäre es nicht so schön"
---
example outputs:
input: ich liebe das leben --> output: Ich liebe das Leben.
input: es ist schön so viele tolle menschen um sich zu haben denn ohne sie wäre es nicht so schön --> output: Es ist schön, so viele tolle Menschen, um sich zu haben, denn ohne sie wäre es nicht so schön.
input: der kunde hat ausdrücklich nach dirk verlangt weil er den rabatt haben möchte --> output: Der Kunde hat ausdrücklich nach Dirk verlangt, weil er den Rabatt haben möchte.
the data can be prepared like this:
the broken_text is used as input, while the text is the output
```python
import re
import phonetics
import random
chars_to_ignore_regex = "[^A-Za-z0-9\ö\ä\ü\Ö\Ä\Ü\ß\-,;.:?! ]+"
broken_chars_to_ignore_regex = "[^A-Za-z0-9\ö\ä\ü\Ö\Ä\Ü\ß\- ]+"
def do_manipulation(string):
text = re.sub(chars_to_ignore_regex, '', string)
broken_text = re.sub(broken_chars_to_ignore_regex, "", text.lower())
if(random.randint(0,100) >= 50):
for xyz in range(int(len(broken_text.split(" "))/4)):
if(random.randint(0,100) > 30):
randc = random.choice(broken_text.split(" "))
if(random.randint(0,10) > 4):
broken_text = broken_text.replace(randc, ''.join(random.choice('abcdefghijklmnopqrstuvxyz') for _ in range(len(randc))).lower())
else:
broken_text = broken_text.replace(randc, phonetics.metaphone(randc).lower())
return text, broken_text
```
|
flozi00/marian-german-grammar | 2021-06-13T14:56:45.000Z | [
"pytorch",
"marian",
"seq2seq",
"transformers",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"all_results.json",
"config.json",
"pytorch_model.bin",
"rng_state.pth",
"scheduler.pt",
"source.spm",
"special_tokens_map.json",
"target.spm",
"tokenizer_config.json",
"train_results.json",
"trainer_state.json",
"training_args.bin",
"vocab.json"
]
| flozi00 | 76 | transformers | |
flozi00/wav2vec-xlsr-german | 2021-03-29T15:46:48.000Z | [
"pytorch",
"wav2vec2",
"de",
"dataset:common_voice",
"transformers",
"audio",
"automatic-speech-recognition",
"speech",
"xlsr-fine-tuning-week",
"license:apache-2.0"
]
| automatic-speech-recognition | [
".gitattributes",
"README.md",
"config.json",
"optimizer.pt",
"preprocessor_config.json",
"pytorch_model.bin",
"scheduler.pt",
"trainer_state.json",
"training_args.bin",
"vocab.json",
".vscode/settings.json"
]
| flozi00 | 481 | transformers | ---
language: de
datasets:
- common_voice
tags:
- audio
- automatic-speech-recognition
- speech
- xlsr-fine-tuning-week
license: apache-2.0
model-index:
- name: XLSR Wav2Vec2 German by Florian Zimmermeister
results:
- task:
name: Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice de
type: common_voice
args: de
metrics:
- name: Test WER
type: wer
value: 15.36
---
# Wav2Vec2-Large-XLSR-53-German
Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) in German using the [Common Voice](https://huggingface.co/datasets/common_voice)
When using this model, make sure that your speech input is sampled at 16kHz.
## Usage
The model can be used directly (without a language model) as follows:
```python
import torch
import torchaudio
from datasets import load_dataset
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
test_dataset = load_dataset("common_voice", "de", split="test[:2%]").
processor = Wav2Vec2Processor.from_pretrained("flozi00/wav2vec-xlsr-german")
model = Wav2Vec2ForCTC.from_pretrained("flozi00/wav2vec-xlsr-german")
resampler = torchaudio.transforms.Resample(48_000, 16_000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays
def speech_file_to_array_fn(batch):
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(speech_array).squeeze().numpy()
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
```
## Evaluation
The model can be evaluated as follows on the {language} test data of Common Voice.
```python
import datasets
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import soundfile as sf
import torch
from jiwer import wer
import librosa
import re
import json
import time
chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\”\\\\�]'
pattern = re.compile("[A-Za-z0-9 ]+")
#try:
# os.remove("dataset.json")
#except:
# pass
modelName = "flozi00/wav2vec-xlsr-german"
model = Wav2Vec2ForCTC.from_pretrained(modelName).to("cuda")
tokenizer = Wav2Vec2Processor.from_pretrained(modelName)
print("loading and cleaning")
val_dataset = datasets.load_dataset("common_voice","de", split="validation")
val_dataset = val_dataset.remove_columns(["client_id","up_votes","down_votes","age","gender","accent","locale","segment"])
val_dataset = val_dataset.rename_column("path","file")
val_dataset = val_dataset.rename_column("sentence","text")
#val_dataset = val_dataset.filter(lambda example, indice: indice % 30 == 0, with_indices=True)
print(val_dataset)
def map_to_array(batch):
batch["text"] = re.sub(chars_to_ignore_regex, '', batch["text"]).upper() + " "
try:
speech_array, sampling_rate = sf.read(batch["file"]+ ".wav")
except:
speech_array, sampling_rate = librosa.load(batch["file"], sr=16000, res_type='kaiser_fast')
sf.write(batch["file"] + ".wav", speech_array, sampling_rate, subtype='PCM_24')
batch["speech"] = speech_array
return batch
print("reading audio")
start = time.time()
val_dataset = val_dataset.map(map_to_array)
print(time.time()-start)
val_dataset = val_dataset.filter(lambda example: True if pattern.fullmatch(example["text"]) is not None else False)
def map_to_pred(batch):
inputs = tokenizer(batch["speech"], return_tensors="pt", padding="longest")
input_values = inputs.input_values.to("cuda")
attention_mask = inputs.attention_mask.to("cuda")
with torch.no_grad():
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = tokenizer.batch_decode(predicted_ids)
batch["transcription"] = transcription
return batch
print("predicting")
result = val_dataset.map(map_to_pred, batched=True, batch_size=32, remove_columns=["speech"])
print("WER:", wer(result["text"], result["transcription"]))
```
**Test Result**: 15.36 %
|
fmikaelian/camembert-base-fquad | 2020-12-11T21:40:08.000Z | [
"pytorch",
"camembert",
"question-answering",
"fr",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
]
| fmikaelian | 17,045 | transformers | ---
language: fr
---
# camembert-base-fquad
## Description
A baseline model for question-answering in french ([CamemBERT](https://camembert-model.fr/) model fine-tuned on [FQuAD](https://fquad.illuin.tech/))
## Training hyperparameters
```shell
python3 ./examples/question-answering/run_squad.py \
--model_type camembert \
--model_name_or_path camembert-base \
--do_train \
--do_eval \
--do_lower_case \
--train_file train.json \
--predict_file valid.json \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir output \
--per_gpu_eval_batch_size=3 \
--per_gpu_train_batch_size=3 \
--save_steps 10000
```
## Evaluation results
```shell
{"f1": 77.24515316052342, "exact_match": 52.82308657465496}
```
## Usage
```python
from transformers import pipeline
nlp = pipeline('question-answering', model='fmikaelian/camembert-base-fquad', tokenizer='fmikaelian/camembert-base-fquad')
nlp({
'question': "Qui est Claude Monet?",
'context': "Claude Monet, né le 14 novembre 1840 à Paris et mort le 5 décembre 1926 à Giverny, est un peintre français et l’un des fondateurs de l'impressionnisme."
})
``` |
fmikaelian/camembert-base-squad | 2020-12-11T21:40:12.000Z | [
"pytorch",
"camembert",
"question-answering",
"fr",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"sentencepiece.bpe.model",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin"
]
| fmikaelian | 14,311 | transformers | ---
language: fr
---
# camembert-base-squad
## Description
A baseline model for question-answering in french ([CamemBERT](https://camembert-model.fr/) model fine-tuned on [french-translated SQuAD 1.1 dataset](https://github.com/Alikabbadj/French-SQuAD))
## Training hyperparameters
```shell
python3 ./examples/question-answering/run_squad.py \
--model_type camembert \
--model_name_or_path camembert-base \
--do_train \
--do_eval \
--do_lower_case \
--train_file SQuAD-v1.1-train_fr_ss999_awstart2_net.json \
--predict_file SQuAD-v1.1-dev_fr_ss999_awstart2_net.json \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir output3 \
--per_gpu_eval_batch_size=3 \
--per_gpu_train_batch_size=3 \
--save_steps 10000
```
## Evaluation results
```shell
{"f1": 79.8570684959745, "exact_match": 59.21327108373895}
```
## Usage
```python
from transformers import pipeline
nlp = pipeline('question-answering', model='fmikaelian/camembert-base-squad', tokenizer='fmikaelian/camembert-base-squad')
nlp({
'question': "Qui est Claude Monet?",
'context': "Claude Monet, né le 14 novembre 1840 à Paris et mort le 5 décembre 1926 à Giverny, est un peintre français et l’un des fondateurs de l'impressionnisme."
})
``` |
fmikaelian/flaubert-base-uncased-squad | 2020-12-11T21:40:15.000Z | [
"pytorch",
"flaubert",
"question-answering",
"fr",
"transformers"
]
| question-answering | [
".gitattributes",
"README.md",
"config.json",
"merges.txt",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"training_args.bin",
"vocab.json"
]
| fmikaelian | 270 | transformers | ---
language: fr
---
# flaubert-base-uncased-squad
## Description
A baseline model for question-answering in french ([flaubert](https://github.com/getalp/Flaubert) model fine-tuned on [french-translated SQuAD 1.1 dataset](https://github.com/Alikabbadj/French-SQuAD))
## Training hyperparameters
```shell
python3 ./examples/question-answering/run_squad.py \
--model_type flaubert \
--model_name_or_path flaubert-base-uncased \
--do_train \
--do_eval \
--do_lower_case \
--train_file SQuAD-v1.1-train_fr_ss999_awstart2_net.json \
--predict_file SQuAD-v1.1-dev_fr_ss999_awstart2_net.json \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir output \
--per_gpu_eval_batch_size=3 \
--per_gpu_train_batch_size=3
```
## Evaluation results
```shell
{"f1": 68.66174806561969, "exact_match": 49.299692063176714}
```
## Usage
```python
from transformers import pipeline
nlp = pipeline('question-answering', model='fmikaelian/flaubert-base-uncased-squad', tokenizer='fmikaelian/flaubert-base-uncased-squad')
nlp({
'question': "Qui est Claude Monet?",
'context': "Claude Monet, né le 14 novembre 1840 à Paris et mort le 5 décembre 1926 à Giverny, est un peintre français et l’un des fondateurs de l'impressionnisme."
})
``` |
formermagic/bart-base-python-1m | 2021-02-06T11:13:00.000Z | [
"pytorch",
"bart",
"seq2seq",
"py",
"transformers",
"license:mit",
"text2text-generation"
]
| text2text-generation | [
".gitattributes",
"README.md",
"config.json",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json"
]
| formermagic | 12 | transformers | ---
license: mit
language: py
thumbnail: https://avatars.githubusercontent.com/u/70610668?s=400&u=f0699303289113c125e8686338739d9a63d5826c&v=4
tags:
- bart
- pytorch
---
# bart-base-python-1m |
formermagic/roberta-base-python-1m | 2021-05-20T16:19:17.000Z | [
"pytorch",
"jax",
"roberta",
"masked-lm",
"py",
"transformers",
"license:mit",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer.json",
"tokenizer_config.json"
]
| formermagic | 7 | transformers | ---
license: mit
language: py
thumbnail: https://avatars.githubusercontent.com/u/70610668?s=400&u=f0699303289113c125e8686338739d9a63d5826c&v=4
tags:
- roberta
- pytorch
---
# roberta-base-python-1m |
formu/DR-Site | 2021-03-26T15:34:21.000Z | []
| [
".gitattributes",
"README.md"
]
| formu | 0 | https://www.geogebra.org/m/w8uzjttg
https://www.geogebra.org/m/gvn7m78g
https://www.geogebra.org/m/arxecanq
https://www.geogebra.org/m/xb69bvww
https://www.geogebra.org/m/apvepfnd
https://www.geogebra.org/m/evmj8ckk
https://www.geogebra.org/m/qxcxwmhp
https://www.geogebra.org/m/p3cxqh6c
https://www.geogebra.org/m/ggrahbgd
https://www.geogebra.org/m/pnhymrbc
https://www.geogebra.org/m/zjukbtk9
https://www.geogebra.org/m/bbezun8r
https://www.geogebra.org/m/sgwamtru
https://www.geogebra.org/m/fpunkxxp
https://www.geogebra.org/m/acxebrr7 |
||
fran-martinez/scibert_scivocab_cased_ner_jnlpba | 2021-05-19T16:56:50.000Z | [
"pytorch",
"jax",
"bert",
"token-classification",
"scientific english",
"arxiv:1903.10676",
"transformers"
]
| token-classification | [
".gitattributes",
"README.md",
"config.json",
"flax_model.msgpack",
"pytorch_model.bin",
"special_tokens_map.json",
"tokenizer_config.json",
"vocab.txt"
]
| fran-martinez | 219 | transformers | ---
language: scientific english
---
# SciBERT finetuned on JNLPA for NER downstream task
## Language Model
[SciBERT](https://arxiv.org/pdf/1903.10676.pdf) is a pretrained language model based on BERT and trained by the
[Allen Institute for AI](https://allenai.org/) on papers from the corpus of
[Semantic Scholar](https://www.semanticscholar.org/).
Corpus size is 1.14M papers, 3.1B tokens. SciBERT has its own vocabulary (scivocab) that's built to best match
the training corpus.
## Downstream task
[`allenai/scibert_scivocab_cased`](https://huggingface.co/allenai/scibert_scivocab_cased#) has been finetuned for Named Entity
Recognition (NER) dowstream task. The code to train the NER can be found [here](https://github.com/fran-martinez/bio_ner_bert).
### Data
The corpus used to fine-tune the NER is [BioNLP / JNLPBA shared task](http://www.geniaproject.org/shared-tasks/bionlp-jnlpba-shared-task-2004).
- Training data consist of 2,000 PubMed abstracts with term/word annotation. This corresponds to 18,546 samples (senteces).
- Evaluation data consist of 404 PubMed abstracts with term/word annotation. This corresponds to 3,856 samples (sentences).
The classes (at word level) and its distribution (number of examples for each class) for training and evaluation datasets are shown below:
| Class Label | # training examples| # evaluation examples|
|:--------------|--------------:|----------------:|
|O | 382,963 | 81,647 |
|B-protein | 30,269 | 5,067 |
|I-protein | 24,848 | 4,774 |
|B-cell_type | 6,718 | 1,921 |
|I-cell_type | 8,748 | 2,991 |
|B-DNA | 9,533 | 1,056 |
|I-DNA | 15,774 | 1,789 |
|B-cell_line | 3,830 | 500 |
|I-cell_line | 7,387 | 9,89 |
|B-RNA | 951 | 118 |
|I-RNA | 1,530 | 187 |
### Model
An exhaustive hyperparameter search was done.
The hyperparameters that provided the best results are:
- Max length sequence: 128
- Number of epochs: 6
- Batch size: 32
- Dropout: 0.3
- Optimizer: Adam
The used learning rate was 5e-5 with a decreasing linear schedule. A warmup was used at the beggining of the training
with a ratio of steps equal to 0.1 from the total training steps.
The model from the epoch with the best F1-score was selected, in this case, the model from epoch 5.
### Evaluation
The following table shows the evaluation metrics calculated at span/entity level:
| | precision| recall| f1-score|
|:---------|-----------:|---------:|---------:|
cell_line | 0.5205 | 0.7100 | 0.6007 |
cell_type | 0.7736 | 0.7422 | 0.7576 |
protein | 0.6953 | 0.8459 | 0.7633 |
DNA | 0.6997 | 0.7894 | 0.7419 |
RNA | 0.6985 | 0.8051 | 0.7480 |
| | | |
**micro avg** | 0.6984 | 0.8076 | 0.7490|
**macro avg** | 0.7032 | 0.8076 | 0.7498 |
The macro F1-score is equal to 0.7498, compared to the value provided by the Allen Institute for AI in their
[paper](https://arxiv.org/pdf/1903.10676.pdf), which is equal to 0.7728. This drop in performance could be due to
several reasons, but one hypothesis could be the fact that the authors used an additional conditional random field,
while this model uses a regular classification layer with softmax activation on top of SciBERT model.
At word level, this model achieves a precision of 0.7742, a recall of 0.8536 and a F1-score of 0.8093.
### Model usage in inference
Use the pipeline:
````python
from transformers import pipeline
text = "Mouse thymus was used as a source of glucocorticoid receptor from normal CS lymphocytes."
nlp_ner = pipeline("ner",
model='fran-martinez/scibert_scivocab_cased_ner_jnlpba',
tokenizer='fran-martinez/scibert_scivocab_cased_ner_jnlpba')
nlp_ner(text)
"""
Output:
---------------------------
[
{'word': 'glucocorticoid',
'score': 0.9894881248474121,
'entity': 'B-protein'},
{'word': 'receptor',
'score': 0.989505410194397,
'entity': 'I-protein'},
{'word': 'normal',
'score': 0.7680378556251526,
'entity': 'B-cell_type'},
{'word': 'cs',
'score': 0.5176806449890137,
'entity': 'I-cell_type'},
{'word': 'lymphocytes',
'score': 0.9898491501808167,
'entity': 'I-cell_type'}
]
"""
````
Or load model and tokenizer as follows:
````python
import torch
from transformers import AutoTokenizer, AutoModelForTokenClassification
# Example
text = "Mouse thymus was used as a source of glucocorticoid receptor from normal CS lymphocytes."
# Load model
tokenizer = AutoTokenizer.from_pretrained("fran-martinez/scibert_scivocab_cased_ner_jnlpba")
model = AutoModelForTokenClassification.from_pretrained("fran-martinez/scibert_scivocab_cased_ner_jnlpba")
# Get input for BERT
input_ids = torch.tensor(tokenizer.encode(text)).unsqueeze(0)
# Predict
with torch.no_grad():
outputs = model(input_ids)
# From the output let's take the first element of the tuple.
# Then, let's get rid of [CLS] and [SEP] tokens (first and last)
predictions = outputs[0].argmax(axis=-1)[0][1:-1]
# Map label class indexes to string labels.
for token, pred in zip(tokenizer.tokenize(text), predictions):
print(token, '->', model.config.id2label[pred.numpy().item()])
"""
Output:
---------------------------
mouse -> O
thymus -> O
was -> O
used -> O
as -> O
a -> O
source -> O
of -> O
glucocorticoid -> B-protein
receptor -> I-protein
from -> O
normal -> B-cell_type
cs -> I-cell_type
lymphocytes -> I-cell_type
. -> O
"""
````
|
franccesco/test_model | 2021-03-14T22:41:52.000Z | []
| [
".gitattributes"
]
| franccesco | 0 | |||
frankdarkluo/bert-base-chinese | 2021-05-05T17:01:34.000Z | []
| [
".gitattributes"
]
| frankdarkluo | 0 | |||
fspanda/Electra-Medical-v1.5-discriminator | 2020-11-04T15:00:32.000Z | [
"pytorch",
"electra",
"pretraining",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.txt"
]
| fspanda | 17 | transformers | ||
fspanda/Electra-Medical-v1.5-generator | 2020-11-04T14:58:52.000Z | [
"pytorch",
"electra",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.txt"
]
| fspanda | 22 | transformers | |
fspanda/Electra-Medical-v790000-discriminator | 2020-10-31T13:22:33.000Z | [
"pytorch",
"electra",
"pretraining",
"transformers"
]
| [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.txt"
]
| fspanda | 12 | transformers | ||
fspanda/Electra-Medical-v790000-generator | 2020-10-31T13:24:12.000Z | [
"pytorch",
"electra",
"masked-lm",
"transformers",
"fill-mask"
]
| fill-mask | [
".gitattributes",
"config.json",
"pytorch_model.bin",
"tokenizer_config.json",
"vocab.txt"
]
| fspanda | 23 | transformers |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.