modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
CALM/backup
[ "lean_albert", "transformers" ]
null
{ "architectures": [ "LeanAlbertForPretraining", "LeanAlbertForTokenClassification", "LeanAlbertForSequenceClassification" ], "model_type": "lean_albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
Moved here: https://huggingface.co/google/bigbird-pegasus-large-pubmed
CAMeL-Lab/bert-base-arabic-camelbert-ca-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
Moved here: https://huggingface.co/google/bigbird-roberta-base
CAMeL-Lab/bert-base-arabic-camelbert-ca-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
Moved here: https://huggingface.co/google/bigbird-roberta-large
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- language: en license: apache-2.0 datasets: natural_questions widget: - text: "Who added BigBird to HuggingFace Transformers?" context: "BigBird Pegasus just landed! Thanks to Vasudev Gupta, BigBird Pegasus from Google AI is merged into HuggingFace Transformers. Check it out today!!!" --- This checkpoint is obtained after training `BigBirdForQuestionAnswering` (with extra pooler head) on [`natural_questions`](https://huggingface.co/datasets/natural_questions) dataset for ~ 2 weeks on 2 K80 GPUs. Script for training can be found here: https://github.com/vasudevgupta7/bigbird | Exact Match | 47.44 | |-------------|-------| **Use this model just like any other model from 🤗Transformers** ```python from transformers import BigBirdForQuestionAnswering model_id = "vasudevgupta/bigbird-roberta-natural-questions" model = BigBirdForQuestionAnswering.from_pretrained(model_id) tokenizer = BigBirdTokenizer.from_pretrained(model_id) ``` In case you are interested in predicting category (null, long, short, yes, no) as well, use `BigBirdForNaturalQuestions` (instead of `BigBirdForQuestionAnswering`) from my training script.
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
DL research papers **Title -> abstract** **Using this model** ```python from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("vasudevgupta/dl-hack-distilgpt2") model = GPT2LMHeadModel.from_pretrained("vasudevgupta/dl-hack-distilgpt2") agent = pipeline("text-generation", model=model, tokenizer=tokenizer) print(agent("An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", max_length=200)) ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
73
null
DL research papers **Title -> abstract** **Using this model** ```python from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("vasudevgupta/dl-hack-gpt2-large") model = GPT2LMHeadModel.from_pretrained("vasudevgupta/dl-hack-gpt2-large") agent = pipeline("text-generation", model=model, tokenizer=tokenizer) print(agent("An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale", max_length=200)) ```
CAMeL-Lab/bert-base-arabic-camelbert-ca
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
580
null
Deep Learning research papers **Title -> abstract**
CAMeL-Lab/bert-base-arabic-camelbert-da-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
# finetuned-wav2vec2-960h This model was trained as a part of my **GSoC'21 (Google Summer of Code)** project. It is fine-tuned on 960h of **LibriSpeech dataset** (`train-clean-100`, `train-clean-360`, `train-other-500`) and evaluated on `test-clean` data. | WER (word error rate) | 5.67 | |-----------------------|------| You can find code for training here: https://github.com/vasudevgupta7/gsoc-wav2vec2.
CAMeL-Lab/bert-base-arabic-camelbert-da-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- language: en license: apache-2.0 datasets: natural_questions widget: - text: "Who added BigBird to HuggingFace Transformers?" context: "BigBird Pegasus just landed! Thanks to Vasudev Gupta, BigBird Pegasus from Google AI is merged into HuggingFace Transformers. Check it out today!!!" --- This checkpoint is obtained after training `FlaxBigBirdForQuestionAnswering` (with extra pooler head) on [`natural_questions`](https://huggingface.co/datasets/natural_questions) dataset on TPU v3-8. This dataset takes around ~100 GB on disk. But thanks to Cloud TPUs and Jax, each epoch took just 4.5 hours. Script for training can be found here: https://github.com/vasudevgupta7/bigbird **Use this model just like any other model from 🤗Transformers** ```python from transformers import FlaxBigBirdForQuestionAnswering, BigBirdTokenizerFast model_id = "vasudevgupta/flax-bigbird-natural-questions" model = FlaxBigBirdForQuestionAnswering.from_pretrained(model_id) tokenizer = BigBirdTokenizerFast.from_pretrained(model_id) ``` In case you are interested in predicting category (null, long, short, yes, no) as well, use `FlaxBigBirdForNaturalQuestions` (instead of `FlaxBigBirdForQuestionAnswering`) from my training script. | Exact Match | 55.12 | |-------------|-------| Evaluation script: https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/evaluate-flax-natural-questions.ipynb
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
TensorFlow version of [facebook/wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base-960h). Obtained using script from https://github.com/vasudevgupta7/gsoc-wav2vec2.
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
54
null
TensorFlow equivalent of [facebook/wav2vec2-large-robust](https://huggingface.co/facebook/wav2vec2-large-robust)
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
TensorFlow equivalent of [`facebook/wav2vec2-large-xlsr-53`](https://huggingface.co/facebook/wav2vec2-large-xlsr-53)
CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19,850
null
Wav2Vec2 Model (initialized from [`facebook/wav2vec2-base`](https://huggingface.co/facebook/wav2vec2-base)) with **no** LM head. Model weights are converted into TensorFlow using following script: ```shell python3 convert_torch_to_tf.py --hf_model_id "facebook/wav2vec2-base" ``` **TF SavedModel** is obtained by running following commands: ```shell git clone https://huggingface.co/vasudevgupta/gsoc-wav2vec2 python3 export2hub.py \ --hf_model_id facebook/wav2vec2-base \ --saved_model_dir gsoc-wav2vec2/saved-model \ --seqlen 246000 cd gsoc-wav2vec2 && tar -czf saved-model.tar.gz saved-model ``` Project Link: https://github.com/vasudevgupta7/gsoc-wav2vec2
CAMeL-Lab/bert-base-arabic-camelbert-da
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
449
null
--- datasets: pib widget: - text: "હેય! હું વાસુદેવ ગુપ્તા છું" --- mBART (a pre-trained model by Facebook) is pre-trained to de-noise multiple languages simultaneously with BART objective. Checkpoint available in this repository is obtained after fine-tuning `facebook/mbart-large-cc25` on all samples (~60K) from Bhasha (pib_v1.3) Gujarati-English parallel corpus. This checkpoint gives decent results for Gujarati-english translation.
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus26
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
45
null
--- datasets: pib widget: - text: "नमस्ते! मैं वासुदेव गुप्ता हूं" --- mBART (a pre-trained model by Facebook) is pre-trained to de-noise multiple languages simultaneously with BART objective. Checkpoint available in this repository is obtained after fine-tuning `facebook/mbart-large-cc25` on all samples (~260K) from Bhasha (pib_v1.3) Hindi-English parallel corpus. This checkpoint gives decent results for Hindi-english translation.
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus6
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- datasets: pib widget: - text: "नमस्ते! मैं वासुदेव गुप्ता हूं" --- mBART (a pre-trained model by Facebook) is pre-trained to de-noise multiple languages simultaneously with BART objective. Checkpoint available in this repository is obtained after fine-tuning `facebook/mbart-large-cc25` on 0.5 M samples from IIT-B Hindi-English parallel corpus. This checkpoint gives decent results for Hindi-english translation.
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
63
null
This model is trained as a part of **InterIIT'21 competition**, on the dataset provided by Bridgei2i. It is able to do multilingual (Hindi, English, Hinglish) summarization (many -> one) & is capable of generating summaries in English regardless of the input language. | Rouge-L | Sacrebleu | Headline Similarity (using sentence-transformers) | |-----------------------|-----------|---------------------------------------------------| | p=0.46 r=0.49 f1=0.52 | 23.46 | 0.75 | mBART is initialized from **facebook/mbart-large-cc25** and is trained as per strategy mentioned in our [GitHub](https://github.com/vasudevgupta7/Bridgei2i-Winning-Solutions).
CAMeL-Lab/bert-base-arabic-camelbert-mix-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,860
null
**Project GitHub:** https://github.com/vasudevgupta7/transformers-adapters **Notes** * base model can be downloaded from `facebook/mbart-large-cc25` * `adapters-hin-eng.pt`: adapters hin-eng * `adapters-guj-eng.pt`: adapters guj-eng
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
TensorFlow version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base). Obtained using script from https://github.com/vasudevgupta7/gsoc-wav2vec2.
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
62
null
--- language: en datasets: - vblagoje/lfqa - vblagoje/lfqa_support_docs license: mit --- ## Introduction See [blog post](https://towardsdatascience.com/long-form-qa-beyond-eli5-an-updated-dataset-and-approach-319cb841aabb) for more details. ## Usage ```python import torch from transformers import AutoTokenizer, AutoModel, AutoModelForSeq2SeqLM model_name = "vblagoje/bart_lfqa" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = model.to(device) # it all starts with a question/query query = "Why does water heated to room temperature feel colder than the air around it?" # given the question above suppose these documents below were found in some document store documents = ["when the skin is completely wet. The body continuously loses water by...", "at greater pressures. There is an ambiguity, however, as to the meaning of the terms 'heating' and 'cooling'...", "are not in a relation of thermal equilibrium, heat will flow from the hotter to the colder, by whatever pathway...", "air condition and moving along a line of constant enthalpy toward a state of higher humidity. A simple example ...", "Thermal contact conductance In physics, thermal contact conductance is the study of heat conduction between solid ..."] # concatenate question and support documents into BART input conditioned_doc = "<P> " + " <P> ".join([d for d in documents]) query_and_docs = "question: {} context: {}".format(query, conditioned_doc) model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt") generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device), attention_mask=model_input["attention_mask"].to(device), min_length=64, max_length=256, do_sample=False, early_stopping=True, num_beams=8, temperature=1.0, top_k=None, top_p=None, eos_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=1) tokenizer.batch_decode(generated_answers_encoded, skip_special_tokens=True,clean_up_tokenization_spaces=True) # below is the abstractive answer generated by the model ["When you heat water to room temperature, it loses heat to the air around it. When you cool it down, it gains heat back from the air, which is why it feels colder than the air surrounding it. It's the same reason why you feel cold when you turn on a fan. The air around you is losing heat, and the water is gaining heat."] ``` ## Author - Vladimir Blagojevic: `dovlex [at] gmail.com` [Twitter](https://twitter.com/vladblagoje) | [LinkedIn](https://www.linkedin.com/in/blagojevicvladimir/)
CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
855
null
--- language: en datasets: - vblagoje/lfqa license: mit --- ## Introduction The context/passage encoder model based on [DPRContextEncoder](https://huggingface.co/docs/transformers/master/en/model_doc/dpr#transformers.DPRContextEncoder) architecture. It uses the transformer's pooler outputs as context/passage representations. ## Training We trained vblagoje/dpr-ctx_encoder-single-lfqa-base using FAIR's dpr-scale starting with PAQ based pretrained checkpoint and fine-tuned the retriever on the question-answer pairs from the LFQA dataset. As dpr-scale requires DPR formatted training set input with positive, negative, and hard negative samples - we created a training file with an answer being positive, negatives being question unrelated answers, while hard negative samples were chosen from answers on questions between 0.55 and 0.65 of cosine similarity. ## Performance LFQA DPR-based retriever (vblagoje/dpr-question_encoder-single-lfqa-base and vblagoje/dpr-ctx_encoder-single-lfqa-base) had a score of 6.69 for R-precision and 14.5 for Recall@5 on KILT benchmark. ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer model = DPRQuestionEncoder.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-base").to(device) tokenizer = AutoTokenizer.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-base") input_ids = tokenizer("Why do airplanes leave contrails in the sky?", return_tensors="pt")["input_ids"] embeddings = model(input_ids).pooler_output ``` ## Author - Vladimir Blagojevic: `dovlex [at] gmail.com` [Twitter](https://twitter.com/vladblagoje) | [LinkedIn](https://www.linkedin.com/in/blagojevicvladimir/)
CAMeL-Lab/bert-base-arabic-camelbert-mix
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "Arabic", "Dialect", "Egyptian", "Gulf", "Levantine", "Classical Arabic", "MSA", "Modern Standard Arabic", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20,880
null
--- language: en datasets: - vblagoje/lfqa license: mit --- ## Introduction The context/passage encoder model based on [DPRContextEncoder](https://huggingface.co/docs/transformers/master/en/model_doc/dpr#transformers.DPRContextEncoder) architecture. It uses the transformer's pooler outputs as context/passage representations. See [blog post](https://towardsdatascience.com/long-form-qa-beyond-eli5-an-updated-dataset-and-approach-319cb841aabb) for more details. ## Training We trained vblagoje/dpr-ctx_encoder-single-lfqa-wiki using FAIR's dpr-scale in two stages. In the first stage, we used PAQ based pretrained checkpoint and fine-tuned the retriever on the question-answer pairs from the LFQA dataset. As dpr-scale requires DPR formatted training set input with positive, negative, and hard negative samples - we created a training file with an answer being positive, negatives being question unrelated answers, while hard negative samples were chosen from answers on questions between 0.55 and 0.65 of cosine similarity. In the second stage, we created a new DPR training set using positives, negatives, and hard negatives from the Wikipedia/Faiss index created in the first stage instead of LFQA dataset answers. More precisely, for each dataset question, we queried the first stage Wikipedia Faiss index and subsequently used SBert cross-encoder to score questions/answers (passage) pairs with topk=50. The cross-encoder selected the positive passage with the highest score, while the bottom seven answers were selected for hard-negatives. Negative samples were again chosen to be answers unrelated to a given dataset question. After creating a DPR formatted training file with Wikipedia sourced positive, negative, and hard negative passages, we trained DPR-based question/passage encoders using dpr-scale. ## Performance LFQA DPR-based retriever (vblagoje/dpr-question_encoder-single-lfqa-wiki and vblagoje/dpr-ctx_encoder-single-lfqa-wiki) slightly underperform 'state-of-the-art' Krishna et al. "Hurdles to Progress in Long-form Question Answering" REALM based retriever with KILT benchmark performance of 11.2 for R-precision and 19.5 for Recall@5. ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer tokenizer = DPRContextEncoderTokenizer.from_pretrained("vblagoje/dpr-ctx_encoder-single-lfqa-wiki") model = DPRContextEncoder.from_pretrained("vblagoje/dpr-ctx_encoder-single-lfqa-wiki") input_ids = tokenizer("Where an aircraft passes through a cloud, it can disperse the cloud in its path...", return_tensors="pt")["input_ids"] embeddings = model(input_ids).pooler_output ``` ## Author - Vladimir Blagojevic: `dovlex [at] gmail.com` [Twitter](https://twitter.com/vladblagoje) | [LinkedIn](https://www.linkedin.com/in/blagojevicvladimir/)
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-madar-twitter5
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
75
null
--- language: en datasets: - vblagoje/lfqa license: mit --- ## Introduction The question encoder model based on [DPRQuestionEncoder](https://huggingface.co/docs/transformers/master/en/model_doc/dpr#transformers.DPRQuestionEncoder) architecture. It uses the transformer's pooler outputs as question representations. ## Training We trained vblagoje/dpr-question_encoder-single-lfqa-base using FAIR's dpr-scale starting with PAQ based pretrained checkpoint and fine-tuned the retriever on the question-answer pairs from the LFQA dataset. As dpr-scale requires DPR formatted training set input with positive, negative, and hard negative samples - we created a training file with an answer being positive, negatives being question unrelated answers, while hard negative samples were chosen from answers on questions between 0.55 and 0.65 of cosine similarity. ## Performance LFQA DPR-based retriever (vblagoje/dpr-question_encoder-single-lfqa-base and vblagoje/dpr-ctx_encoder-single-lfqa-base) had a score of 6.69 for R-precision and 14.5 for Recall@5 on KILT benchmark. ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer model = DPRQuestionEncoder.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-base").to(device) tokenizer = AutoTokenizer.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-base") input_ids = tokenizer("Why do airplanes leave contrails in the sky?", return_tensors="pt")["input_ids"] embeddings = model(input_ids).pooler_output ``` ## Author - Vladimir Blagojevic: `dovlex [at] gmail.com` [Twitter](https://twitter.com/vladblagoje) | [LinkedIn](https://www.linkedin.com/in/blagojevicvladimir/)
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- language: en datasets: - vblagoje/lfqa license: mit --- ## Introduction The question encoder model based on [DPRQuestionEncoder](https://huggingface.co/docs/transformers/master/en/model_doc/dpr#transformers.DPRQuestionEncoder) architecture. It uses the transformer's pooler outputs as question representations. See [blog post](https://towardsdatascience.com/long-form-qa-beyond-eli5-an-updated-dataset-and-approach-319cb841aabb) for more details. ## Training We trained vblagoje/dpr-question_encoder-single-lfqa-wiki using FAIR's dpr-scale in two stages. In the first stage, we used PAQ based pretrained checkpoint and fine-tuned the retriever on the question-answer pairs from the LFQA dataset. As dpr-scale requires DPR formatted training set input with positive, negative, and hard negative samples - we created a training file with an answer being positive, negatives being question unrelated answers, while hard negative samples were chosen from answers on questions between 0.55 and 0.65 of cosine similarity. In the second stage, we created a new DPR training set using positives, negatives, and hard negatives from the Wikipedia/Faiss index created in the first stage instead of LFQA dataset answers. More precisely, for each dataset question, we queried the first stage Wikipedia Faiss index and subsequently used SBert cross-encoder to score questions/answers (passage) pairs with topk=50. The cross-encoder selected the positive passage with the highest score, while the bottom seven answers were selected for hard-negatives. Negative samples were again chosen to be answers unrelated to a given dataset question. After creating a DPR formatted training file with Wikipedia sourced positive, negative, and hard negative passages, we trained DPR-based question/passage encoders using dpr-scale. ## Performance LFQA DPR-based retriever (vblagoje/dpr-question_encoder-single-lfqa-wiki and vblagoje/dpr-ctx_encoder-single-lfqa-wiki) slightly underperform 'state-of-the-art' Krishna et al. "Hurdles to Progress in Long-form Question Answering" REALM based retriever with KILT benchmark performance of 11.2 for R-precision and 19.5 for Recall@5. ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer model = DPRQuestionEncoder.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-wiki").to(device) tokenizer = AutoTokenizer.from_pretrained("vblagoje/dpr-question_encoder-single-lfqa-wiki") input_ids = tokenizer("Why do airplanes leave contrails in the sky?", return_tensors="pt")["input_ids"] embeddings = model(input_ids).pooler_output ``` ## Author - Vladimir Blagojevic: `dovlex [at] gmail.com` [Twitter](https://twitter.com/vladblagoje) | [LinkedIn](https://www.linkedin.com/in/blagojevicvladimir/)
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
133
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-base-timit-demo-colab results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-timit-demo-colab This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4630 - Wer: 0.3399 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 3.4454 | 4.0 | 500 | 1.2920 | 0.9381 | | 0.5869 | 8.0 | 1000 | 0.4634 | 0.4297 | | 0.2216 | 12.0 | 1500 | 0.4481 | 0.3778 | | 0.1283 | 16.0 | 2000 | 0.4651 | 0.3741 | | 0.0872 | 20.0 | 2500 | 0.4762 | 0.3548 | | 0.0635 | 24.0 | 3000 | 0.4495 | 0.3513 | | 0.0482 | 28.0 | 3500 | 0.4630 | 0.3399 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu111 - Datasets 1.13.3 - Tokenizers 0.10.3
CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: hugging-doge results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9375 --- # hugging-doge Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### golden retriever ![golden retriever](images/golden_retriever.jpg) #### husky ![husky](images/husky.jpg) #### poodle ![poodle](images/poodle.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: en tags: - grammar - text2text-generation license: cc-by-nc-sa-4.0 datasets: - jfleg --- # T5 Grammar Correction This model generates a revised version of inputted text with the goal of containing fewer grammatical errors. It was trained with [Happy Transformer](https://github.com/EricFillion/happy-transformer) using a dataset called [JFLEG](https://arxiv.org/abs/1702.04066). Here's a [full article](https://www.vennify.ai/fine-tune-grammar-correction/) on how to train a similar model. ## Usage `pip install happytransformer ` ```python from happytransformer import HappyTextToText, TTSettings happy_tt = HappyTextToText("T5", "vennify/t5-base-grammar-correction") args = TTSettings(num_beams=5, min_length=1) # Add the prefix "grammar: " before each input result = happy_tt.generate_text("grammar: This sentences has has bads grammar.", args=args) print(result.text) # This sentence has bad grammar. ```
CLAck/indo-pure
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
### Twitter RoBERTa BR This is a RoBERTa Twitter in Portuguese model trained on ~7M tweets. The results will be posted in the future. ### Example of using ``` tokenizer = AutoTokenizer.from_pretrained("verissimomanoel/RobertaTwitterBR") model = AutoModel.from_pretrained("verissimomanoel/RobertaTwitterBR") ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
# QA-for-Event-Extraction ## Model description This is a QA model as part of the event extraction system in the ACL2021 paper: [Zero-shot Event Extraction via Transfer Learning: Challenges and Insights](https://aclanthology.org/2021.acl-short.42/). The pretrained architecture is [roberta-large](https://huggingface.co/roberta-large) and the fine-tuning data is [QAMR](https://github.com/uwnlp/qamr). ## Demo To see how the model works, type a question and a context separated in the right-hand-side textboxs under "Hosted inference API". Example: - Question: `Who was killed?` - Context: `A car bomb exploded Thursday in a crowded outdoor market in the heart of Jerusalem, killing at least two people, police said.` - Answer: `people` ## Usage - To use the QA model independently, follow the [huggingface documentation on AutoModelForQuestionAnswering](https://huggingface.co/transformers/task_summary.html?highlight=automodelforquestionanswering#extractive-question-answering). - To use it as part of the event extraction system, please check out [our Github repo](https://github.com/veronica320/Zeroshot-Event-Extraction). ### BibTeX entry and citation info ``` @inproceedings{lyu-etal-2021-zero, title = "Zero-shot Event Extraction via Transfer Learning: {C}hallenges and Insights", author = "Lyu, Qing and Zhang, Hongming and Sulem, Elior and Roth, Dan", booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.acl-short.42", doi = "10.18653/v1/2021.acl-short.42", pages = "322--332", abstract = "Event extraction has long been a challenging task, addressed mostly with supervised methods that require expensive annotation and are not extensible to new event ontologies. In this work, we explore the possibility of zero-shot event extraction by formulating it as a set of Textual Entailment (TE) and/or Question Answering (QA) queries (e.g. {``}A city was attacked{''} entails {``}There is an attack{''}), exploiting pretrained TE/QA models for direct transfer. On ACE-2005 and ERE, our system achieves acceptable results, yet there is still a large gap from supervised approaches, showing that current QA and TE technologies fail in transferring to a different domain. To investigate the reasons behind the gap, we analyze the remaining key challenges, their respective impact, and possible improvement directions.", } ```
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
# TE-for-Event-Extraction ## Model description This is a TE model as part of the event extraction system in the ACL2021 paper: [Zero-shot Event Extraction via Transfer Learning: Challenges and Insights](https://aclanthology.org/2021.acl-short.42/). The pretrained architecture is [roberta-large](https://huggingface.co/roberta-large) and the fine-tuning data is [MNLI](https://cims.nyu.edu/~sbowman/multinli/). The label mapping is: ``` LABEL_0: Contradiction LABEL_1: Neutral LABEL_2: Entailment ``` ## Demo To see how the model works, type a sentence and a hypothesis separated by "\<\/s\>\<\/s\>" in the right-hand-side textbox under "Hosted inference API". Example: - Input: ``` A car bomb exploded Thursday in a crowded outdoor market in the heart of Jerusalem. </s></s> This text is about an attack. ``` - Output: ``` LABEL_2 (Entailment) ``` ## Usage - To use the TE model independently, follow the [huggingface documentation on AutoModelForSequenceClassification](https://huggingface.co/transformers/task_summary.html#sequence-classification). - To use it as part of the event extraction system, please check out [our Github repo](https://github.com/veronica320/Zeroshot-Event-Extraction). ### BibTeX entry and citation info ``` @inproceedings{lyu-etal-2021-zero, title = "Zero-shot Event Extraction via Transfer Learning: {C}hallenges and Insights", author = "Lyu, Qing and Zhang, Hongming and Sulem, Elior and Roth, Dan", booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.acl-short.42", doi = "10.18653/v1/2021.acl-short.42", pages = "322--332", abstract = "Event extraction has long been a challenging task, addressed mostly with supervised methods that require expensive annotation and are not extensible to new event ontologies. In this work, we explore the possibility of zero-shot event extraction by formulating it as a set of Textual Entailment (TE) and/or Question Answering (QA) queries (e.g. {``}A city was attacked{''} entails {``}There is an attack{''}), exploiting pretrained TE/QA models for direct transfer. On ACE-2005 and ERE, our system achieves acceptable results, yet there is still a large gap from supervised approaches, showing that current QA and TE technologies fail in transferring to a different domain. To investigate the reasons behind the gap, we analyze the remaining key challenges, their respective impact, and possible improvement directions.", } ```
CLTL/icf-levels-att
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu datasets: - versae/modernisa model-index: - name: byt5-base-finetuned-modernisa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # byt5-base-finetuned-modernisa This model is a fine-tuned version of [google/byt5-base](https://huggingface.co/google/byt5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1176 - Bleu: 44.888 - Gen Len: 18.4465 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 0.1474 | 0.35 | 10000 | 0.1360 | 42.8789 | 18.4441 | | 0.1328 | 0.71 | 20000 | 0.1303 | 43.5394 | 18.4368 | | 0.1216 | 1.06 | 30000 | 0.1245 | 44.1557 | 18.4384 | | 0.1167 | 1.42 | 40000 | 0.1219 | 44.1961 | 18.4449 | | 0.1065 | 1.77 | 50000 | 0.1192 | 44.7353 | 18.443 | | 0.099 | 2.13 | 60000 | 0.1195 | 44.522 | 18.4524 | | 0.088 | 2.48 | 70000 | 0.1192 | 44.8243 | 18.4441 | | 0.0907 | 2.84 | 80000 | 0.1176 | 44.888 | 18.4465 | ### Framework versions - Transformers 4.13.0.dev0 - Pytorch 1.10.0+cu111 - Datasets 1.15.2.dev0 - Tokenizers 0.10.3
CLTL/icf-levels-etn
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu datasets: - versae/modernisa model-index: - name: mt5-base-finetuned-modernisa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-base-finetuned-modernisa This model is a fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3179 - Bleu: 81.9164 - Gen Len: 11.1876 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 0.4588 | 0.35 | 10000 | 0.4023 | 78.1616 | 11.1577 | | 0.3982 | 0.71 | 20000 | 0.3584 | 79.3456 | 11.144 | | 0.3465 | 1.06 | 30000 | 0.3424 | 80.4057 | 11.1625 | | 0.3236 | 1.42 | 40000 | 0.3349 | 80.9978 | 11.1869 | | 0.2983 | 1.77 | 50000 | 0.3243 | 81.5426 | 11.1925 | | 0.278 | 2.13 | 60000 | 0.3210 | 81.794 | 11.2047 | | 0.2584 | 2.48 | 70000 | 0.3205 | 81.8086 | 11.1986 | | 0.2609 | 2.84 | 80000 | 0.3179 | 81.9164 | 11.1876 | ### Framework versions - Transformers 4.13.0.dev0 - Pytorch 1.10.0+cu111 - Datasets 1.15.2.dev0 - Tokenizers 0.10.3
Caddy/UD
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# MS Marco Ranking with ColBERT on Vespa.ai Model is based on [ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT](https://arxiv.org/abs/2004.12832). This BERT model is based on [cross-encoder/ms-marco-MiniLM-L-6-v2](https://huggingface.co/cross-encoder/ms-marco-MiniLM-L-6-v2) and trained using the original [ColBERT training routine](https://github.com/stanford-futuredata/ColBERT/). This model has 22.3M trainable parameters and is approximately 2x faster than [vespa-engine/colbert-medium](https://huggingface.co/vespa-engine/colbert-medium) and with better or on pair MRR@10 on dev. The model weights have been tuned by training using a randomized sample of MS Marco training triplets [MSMARCO-Passage-Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking). To use this model with vespa.ai for MS Marco Passage Ranking, see [MS Marco Ranking using Vespa.ai sample app](https://github.com/vespa-engine/sample-apps/tree/master/msmarco-ranking). # MS Marco Passage Ranking | MS Marco Passage Ranking Query Set | MRR@10 ColBERT on Vespa.ai | |------------------------------------|----------------| | Dev | 0.364 | Recall@k On Dev (6980 queries) |K | Recall@K | |------------------------------------|----------------| | 50 | 0.816 | | 200 | 0.905 | | 1000 | 0.939 | The MRR@10 on dev is achieved by re-ranking 1K retrieved by a dense retriever based on [sentence-transformers/msmarco-MiniLM-L-6-v3](https://huggingface.co/sentence-transformers/msmarco-MiniLM-L-6-v3). Re-ranking the original top 1000 dev is 0.354 MRR@10 (Recall@1K 0.82). The official baseline BM25 ranking model MRR@10 0.16 on eval and 0.167 on dev question set. See [MS Marco Passage Ranking Leaderboard](https://microsoft.github.io/msmarco/). ## Export ColBERT query encoder to ONNX We represent the ColBERT query encoder in the Vespa runtime, to map the textual query representation to the tensor representation. For this we use Vespa's support for running ONNX models. One can use the following snippet to export the model for serving. ```python from transformers import BertModel from transformers import BertPreTrainedModel from transformers import BertConfig import torch import torch.nn as nn class VespaColBERT(BertPreTrainedModel): def __init__(self,config): super().__init__(config) self.bert = BertModel(config) self.linear = nn.Linear(config.hidden_size, 32, bias=False) self.init_weights() def forward(self, input_ids, attention_mask): Q = self.bert(input_ids,attention_mask=attention_mask)[0] Q = self.linear(Q) return torch.nn.functional.normalize(Q, p=2, dim=2) colbert_query_encoder = VespaColBERT.from_pretrained("vespa-engine/col-minilm") #Export model to ONNX for serving in Vespa input_names = ["input_ids", "attention_mask"] output_names = ["contextual"] #input, max 32 query term input_ids = torch.ones(1,32, dtype=torch.int64) attention_mask = torch.ones(1,32,dtype=torch.int64) args = (input_ids, attention_mask) torch.onnx.export(colbert_query_encoder, args=args, f="query_encoder_colbert.onnx", input_names = input_names, output_names = output_names, dynamic_axes = { "input_ids": {0: "batch"}, "attention_mask": {0: "batch"}, "contextual": {0: "batch"}, }, opset_version=11) ``` # Representing the model on Vespa.ai See [Ranking with ONNX models](https://docs.vespa.ai/documentation/onnx.html) and [MS Marco Ranking sample app](https://github.com/vespa-engine/sample-apps/tree/master/msmarco-ranking)
Calamarii/calamari
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# MS Marco Ranking with ColBERT on Vespa.ai Model is based on [ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT](https://arxiv.org/abs/2004.12832). This BERT model is based on [google/bert_uncased_L-8_H-512_A-8](https://huggingface.co/google/bert_uncased_L-8_H-512_A-8) and trained using the original [ColBERT training routine](https://github.com/stanford-futuredata/ColBERT/). The model weights have been tuned by training using the `triples.train.small.tar.gz from` [MSMARCO-Passage-Ranking](https://github.com/microsoft/MSMARCO-Passage-Ranking). To use this model with vespa.ai for MS Marco Passage Ranking, see [MS Marco Ranking using Vespa.ai sample app](https://github.com/vespa-engine/sample-apps/tree/master/msmarco-ranking). # MS Marco Passage Ranking | MS Marco Passage Ranking Query Set | MRR@10 ColBERT on Vespa.ai | |------------------------------------|----------------| | Dev | 0.354 | | Eval | 0.347 | The official baseline BM25 ranking model MRR@10 0.16 on eval and 0.167 on dev question set. See [MS Marco Passage Ranking Leaderboard](https://microsoft.github.io/msmarco/). ## Export ColBERT query encoder to ONNX We represent the ColBERT query encoder in the Vespa runtime, to map the textual query representation to the tensor representation. For this we use Vespa's support for running ONNX models. One can use the following snippet to export the model for serving. ```python from transformers import BertModel from transformers import BertPreTrainedModel from transformers import BertConfig import torch import torch.nn as nn class VespaColBERT(BertPreTrainedModel): def __init__(self,config): super().__init__(config) self.bert = BertModel(config) self.linear = nn.Linear(config.hidden_size, 32, bias=False) self.init_weights() def forward(self, input_ids, attention_mask): Q = self.bert(input_ids,attention_mask=attention_mask)[0] Q = self.linear(Q) return torch.nn.functional.normalize(Q, p=2, dim=2) colbert_query_encoder = VespaColBERT.from_pretrained("vespa-engine/colbert-medium") #Export model to ONNX for serving in Vespa input_names = ["input_ids", "attention_mask"] output_names = ["contextual"] #input, max 32 query term input_ids = torch.ones(1,32, dtype=torch.int64) attention_mask = torch.ones(1,32,dtype=torch.int64) args = (input_ids, attention_mask) torch.onnx.export(colbert_query_encoder, args=args, f="query_encoder_colbert.onnx", input_names = input_names, output_names = output_names, dynamic_axes = { "input_ids": {0: "batch"}, "attention_mask": {0: "batch"}, "contextual": {0: "batch"}, }, opset_version=11) ``` # Representing the model on Vespa.ai See [Ranking with ONNX models](https://docs.vespa.ai/documentation/onnx.html) and [MS Marco Ranking sample app](https://github.com/vespa-engine/sample-apps/tree/master/msmarco-ranking)
Callidior/bert2bert-base-arxiv-titlegen
[ "pytorch", "safetensors", "encoder-decoder", "text2text-generation", "en", "dataset:arxiv_dataset", "transformers", "summarization", "license:apache-2.0", "autotrain_compatible", "has_space" ]
summarization
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
145
null
---- language: - is thumbnail: tags: - icelandic - qa license: datasets: - ic3 - igc metrics: - em - f1 widget: - text: "Hvenær var Halldór Laxness í menntaskóla ?" context: "Halldór Laxness ( Halldór Kiljan ) fæddist í Reykjavík 23. apríl árið 1902 og átti í fyrstu heima við Laugaveg en árið 1905 settist fjölskyldan að í Laxnesi í Mosfellssveit . Þar ólst Halldór upp en sótti skóla í Reykjavík á unglingsárum . Ungur hélt hann síðan utan og var langdvölum erlendis um árabil – í ýmsum Evrópulöndum og síðar í Ameríku . Þegar hann var heima bjó hann í Reykjavík þar til hann og kona hans , Auður Sveinsdóttir , byggðu sér húsið Gljúfrastein í Mosfellssveit og fluttu þangað árið 1945 . Þar var heimili þeirra alla tíð síðan og þar er nú safn til minningar um þau . Halldór lést 8. febrúar 1998 . Skólaganga Halldórs varð ekki löng . Árið 1918 hóf hann nám við Menntaskólann í Reykjavík en hafði lítinn tíma til að læra , enda var hann að skrifa skáldsögu , Barn náttúrunnar , sem kom út haustið 1919 – þá þegar var höfundurinn ungi farinn af landi brott . Sagan vakti þó nokkra athygli og í Alþýðublaðinu sagði m.a. : „ Og hver veit nema að Halldór frá Laxnesi eigi eftir að verða óskabarn íslensku þjóðarinnar . “ Upp frá þessu sendi Halldór frá sér bók nánast á hverju ári , stundum fleiri en eina , í yfir sex áratugi . Afköst hans voru með eindæmum ; hann skrifaði fjölda skáldsagna , sumar í nokkrum hlutum , leikrit , kvæði , smásagnasöfn og endurminningabækur og gaf auk þess út mörg greinasöfn og ritgerðir . Bækurnar eru fjölbreyttar en eiga það sameiginlegt að vera skrifaðar af einstakri stílgáfu , djúpum mannskilningi og víðtækri þekkingu á sögu og samfélagi . Þar birtast oft afgerandi skoðanir á þjóðfélagsmálum og sögupersónur eru margar einkar eftirminnilegar ; tilsvör þeirra og lunderni hafa orðið samofin þjóðarsálinni . Þekktustu verk Halldórs eru eflaust skáldsögurnar stóru og rismiklu , s.s. Salka Valka , Sjálfstætt fólk , Heimsljós , Íslandsklukkan og Gerpla , og raunar mætti telja upp mun fleiri ; Kvæðabók hans er í uppáhaldi hjá mörgum sem og minningabækurnar sem hann skrifaði á efri árum um æskuár sín ; af þekktum greinasöfnum og ritgerðum má nefna Alþýðubókina og Skáldatíma . Mikið hefur verið skrifað um verk og ævi skáldsins , en hér skal aðeins bent á ítarlega frásögn og greiningu Halldórs Guðmundssonar í bókinni Halldór Laxness – ævisaga ." --- # IceBERT-QA ## Model description This is an Icelandic reading comprehension Q&A model. ## Intended uses & limitations This model is part of my MSc thesis about Q&A for Icelandic. #### How to use ```python from transformers import AutoTokenizer, AutoModelForQuestionAnswering tokenizer = AutoTokenizer.from_pretrained("vesteinn/IceBERT-QA") model = AutoModelForQuestionAnswering.from_pretrained("vesteinn/IceBERT-QA") ``` #### Limitations and bias ## Training data Translated English datasets were used along with the Natural Questions in Icelandic dataset. ## Training procedure ## Eval results ### BibTeX entry and citation info ```bibtex ```
CallumRai/HansardGPT2
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: gpl-3.0 tags: - generated_from_trainer metrics: - matthews_correlation model-index: - name: IceBERT-finetuned-iec-sentence-bs16 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IceBERT-finetuned-iec-sentence-bs16 This model is a fine-tuned version of [vesteinn/IceBERT](https://huggingface.co/vesteinn/IceBERT) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2508 - Matthews Correlation: 0.8169 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:-----:|:---------------:|:--------------------:| | 0.5278 | 1.0 | 3640 | 0.4777 | 0.5396 | | 0.4648 | 2.0 | 7280 | 0.3886 | 0.6437 | | 0.3807 | 3.0 | 10920 | 0.3478 | 0.7060 | | 0.3061 | 4.0 | 14560 | 0.2523 | 0.8083 | | 0.2477 | 5.0 | 18200 | 0.2508 | 0.8169 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.8.0 - Datasets 1.15.1 - Tokenizers 0.10.3
CalvinHuang/mt5-small-finetuned-amazon-en-es
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "transformers", "summarization", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
summarization
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- license: gpl-3.0 tags: - generated_from_trainer metrics: - matthews_correlation model-index: - name: IceBERT-finetuned-iec-sentence results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IceBERT-finetuned-iec-sentence This model is a fine-tuned version of [vesteinn/IceBERT](https://huggingface.co/vesteinn/IceBERT) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4438 - Matthews Correlation: 0.6062 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | No log | 1.0 | 455 | 0.5283 | 0.4755 | | 0.5696 | 2.0 | 910 | 0.4889 | 0.5272 | | 0.4898 | 3.0 | 1365 | 0.4508 | 0.5793 | | 0.4508 | 4.0 | 1820 | 0.4340 | 0.6042 | | 0.4153 | 5.0 | 2275 | 0.4438 | 0.6062 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.8.0 - Datasets 1.15.1 - Tokenizers 0.10.3
Cameron/BERT-Jigsaw
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- license: gpl-3.0 tags: - generated_from_trainer datasets: - mim_gold_ner metrics: - precision - recall - f1 - accuracy model-index: - name: IceBERT-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: mim_gold_ner type: mim_gold_ner args: mim-gold-ner metrics: - name: Precision type: precision value: 0.8870349771350884 - name: Recall type: recall value: 0.8575696021029992 - name: F1 type: f1 value: 0.8720534629404617 - name: Accuracy type: accuracy value: 0.9848236357672584 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IceBERT-finetuned-ner This model is a fine-tuned version of [vesteinn/IceBERT](https://huggingface.co/vesteinn/IceBERT) on the mim_gold_ner dataset. It achieves the following results on the evaluation set: - Loss: 0.0815 - Precision: 0.8870 - Recall: 0.8576 - F1: 0.8721 - Accuracy: 0.9848 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0536 | 1.0 | 2904 | 0.0749 | 0.8749 | 0.8426 | 0.8585 | 0.9831 | | 0.0269 | 2.0 | 5808 | 0.0754 | 0.8734 | 0.8471 | 0.8600 | 0.9840 | | 0.0173 | 3.0 | 8712 | 0.0815 | 0.8870 | 0.8576 | 0.8721 | 0.9848 | ### Framework versions - Transformers 4.11.0 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
Cameron/BERT-SBIC-offensive
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: gpl-3.0 tags: - generated_from_trainer datasets: - mim_gold_ner metrics: - precision - recall - f1 - accuracy widget: - text: Systurnar Guðrún og Monique átu einar á McDonalds og horfðu á Stöð 2, þar glitti í Bruce Willis leika í Die Hard 2. model-index: - name: IceBERT-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: mim_gold_ner type: mim_gold_ner args: mim-gold-ner metrics: - name: Precision type: precision value: 0.9351994710160899 - name: Recall type: recall value: 0.9440427188786294 - name: F1 type: f1 value: 0.9396002878813043 - name: Accuracy type: accuracy value: 0.9920330921021648 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IceBERT-finetuned-ner This model is a fine-tuned version of [vesteinn/IceBERT](https://huggingface.co/vesteinn/IceBERT) on the mim_gold_ner dataset. It achieves the following results on the evaluation set: - Loss: 0.0347 - Precision: 0.9352 - Recall: 0.9440 - F1: 0.9396 - Accuracy: 0.9920 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0568 | 1.0 | 2929 | 0.0386 | 0.9114 | 0.9162 | 0.9138 | 0.9897 | | 0.0325 | 2.0 | 5858 | 0.0325 | 0.9300 | 0.9363 | 0.9331 | 0.9912 | | 0.0184 | 3.0 | 8787 | 0.0347 | 0.9352 | 0.9440 | 0.9396 | 0.9920 | ### Framework versions - Transformers 4.11.0 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
Cameron/BERT-SBIC-targetcategory
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: is widget: - text: Má bjóða þér <mask> í kvöld? - text: Forseti <mask> er ágæt. - text: Súpan var <mask> á bragðið. tags: - roberta - icelandic - masked-lm - pytorch license: agpl-3.0 datasets: - mideind/icelandic-common-crawl-corpus-IC3 --- # IceBERT IceBERT was trained with fairseq using the RoBERTa-base architecture. The training data used is shown in the table below. | Dataset | Size | Tokens | |------------------------------------------------------|---------|--------| | Icelandic Gigaword Corpus v20.05 (IGC) | 8.2 GB | 1,388M | | Icelandic Common Crawl Corpus (IC3) | 4.9 GB | 824M | | Greynir News articles | 456 MB | 76M | | Icelandic Sagas | 9 MB | 1.7M | | Open Icelandic e-books (Rafbókavefurinn) | 14 MB | 2.6M | | Data from the medical library of Landspitali | 33 MB | 5.2M | | Student theses from Icelandic universities (Skemman) | 2.2 GB | 367M | | Total | 15.8 GB | 2,664M | If you find this model useful, please cite ``` @inproceedings{snaebjarnarson-etal-2022-warm, title = "A Warm Start and a Clean Crawled Corpus - A Recipe for Good Language Models", author = "Sn{\ae}bjarnarson, V{\'e}steinn and S{\'\i}monarson, Haukur Barri and Ragnarsson, P{\'e}tur Orri and Ing{\'o}lfsd{\'o}ttir, Svanhv{\'\i}t Lilja and J{\'o}nsson, Haukur and Thorsteinsson, Vilhjalmur and Einarsson, Hafsteinn", booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.lrec-1.464", pages = "4356--4366", } ```
Cameron/BERT-eec-emotion
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- language: - is - da - sv - 'no' - fo widget: - text: Fina lilla<mask>, jag vill inte bliva stur. - text: Nu ved jeg, at du frygter<mask> og end ikke vil nægte mig din eneste søn.. - text: Það er vorhret á<mask>, napur vindur sem hvín. - text: Ja, Gud signi<mask>, mítt land. - text: Alle dyrene i<mask> må være venner. tags: - roberta - icelandic - norwegian - faroese - danish - swedish - masked-lm - pytorch license: agpl-3.0 datasets: - vesteinn/FC3 - vesteinn/IC3 - mideind/icelandic-common-crawl-corpus-IC3 - NbAiLab/NCC - DDSC/partial-danish-gigaword-no-twitter --- # ScandiBERT Note note: The model has been updated on 2022-09-27 The model was trained on the data shown in the table below. Batch size was 8.8k, the model was trained for 72 epochs on 24 V100 cards for about 2 weeks. | Language | Data | Size | |-----------|---------------------------------------|--------| | Icelandic | See IceBERT paper | 16 GB | | Danish | Danish Gigaword Corpus (incl Twitter) | 4,7 GB | | Norwegian | NCC corpus | 42 GB | | Swedish | Swedish Gigaword Corpus | 3,4 GB | | Faroese | FC3 + Sosialurinn + Bible | 69 MB | Note: At an earlier date a half trained model went up here, it has since been removed. The model has since been updated. This is a Scandinavian BERT model trained on a large collection of Danish, Faroese, Icelandic, Norwegian and Swedish text. It is currently the highest ranking model on the ScandEval leaderbord https://scandeval.github.io/pretrained/ If you find this model useful, please cite ``` @inproceedings{snaebjarnarson-etal-2023-transfer, title = "{T}ransfer to a Low-Resource Language via Close Relatives: The Case Study on Faroese", author = "Snæbjarnarson, Vésteinn and Simonsen, Annika and Glavaš, Goran and Vulić, Ivan", booktitle = "Proceedings of the 24th Nordic Conference on Computational Linguistics (NoDaLiDa)", month = "may 22--24", year = "2023", address = "Tórshavn, Faroe Islands", publisher = {Link{\"o}ping University Electronic Press, Sweden}, } ```
Cameron/BERT-jigsaw-identityhate
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- language: - en - is - multilingual license: agpl-3.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: XLMR-ENIS-finetuned-cola results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue args: cola metrics: - type: matthews_correlation value: 0.6306425398187112 name: Matthews Correlation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # XLMR-ENIS-finetuned-cola This model is a fine-tuned version of [vesteinn/XLMR-ENIS](https://huggingface.co/vesteinn/XLMR-ENIS) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.7311 - Matthews Correlation: 0.6306 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5216 | 1.0 | 535 | 0.5836 | 0.4855 | | 0.3518 | 2.0 | 1070 | 0.4426 | 0.5962 | | 0.2538 | 3.0 | 1605 | 0.5091 | 0.6110 | | 0.1895 | 4.0 | 2140 | 0.6955 | 0.6136 | | 0.1653 | 5.0 | 2675 | 0.7311 | 0.6306 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
Cameron/BERT-jigsaw-severetoxic
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: - en - is - multilingual license: agpl-3.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: XLMR-ENIS-finetuned-ner results: - task: type: token-classification name: Token Classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - type: precision value: 0.9398313331170938 name: Precision - type: recall value: 0.9517943664285128 name: Recall - type: f1 value: 0.9457750214207026 name: F1 - type: accuracy value: 0.9853686150987764 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # XLMR-ENIS-finetuned-ner This model is a fine-tuned version of [vesteinn/XLMR-ENIS](https://huggingface.co/vesteinn/XLMR-ENIS) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0671 - Precision: 0.9398 - Recall: 0.9518 - F1: 0.9458 - Accuracy: 0.9854 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2825 | 1.0 | 878 | 0.0712 | 0.9220 | 0.9379 | 0.9299 | 0.9815 | | 0.0688 | 2.0 | 1756 | 0.0689 | 0.9354 | 0.9477 | 0.9415 | 0.9839 | | 0.039 | 3.0 | 2634 | 0.0671 | 0.9398 | 0.9518 | 0.9458 | 0.9854 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
Cameron/BERT-mdgender-convai-ternary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
null
--- license: agpl-3.0 pipeline_tag: sentence-similarity tags: - generated_from_trainer datasets: - glue metrics: - spearmanr model-index: - name: XLMR-ENIS-finetuned-stsb results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue args: stsb metrics: - name: Spearmanr type: spearmanr value: 0.8887885342806044 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # XLMR-ENIS-finetuned-stsb This model is a fine-tuned version of [vesteinn/XLMR-ENIS](https://huggingface.co/vesteinn/XLMR-ENIS) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.5232 - Pearson: 0.8915 - Spearmanr: 0.8888 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:| | No log | 1.0 | 360 | 0.6330 | 0.8562 | 0.8570 | | 1.2835 | 2.0 | 720 | 0.6368 | 0.8790 | 0.8781 | | 0.4518 | 3.0 | 1080 | 0.5352 | 0.8883 | 0.8852 | | 0.4518 | 4.0 | 1440 | 0.4881 | 0.8910 | 0.8885 | | 0.288 | 5.0 | 1800 | 0.5232 | 0.8915 | 0.8888 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.0+cu111 - Datasets 1.13.0 - Tokenizers 0.10.3
Camzure/MaamiBot-test
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - is - en - multilingual tags: - icelandic - qa datasets: - ic3 - igc metrics: - em - f1 widget: - text: Hverrar tr�ar var Halld�r Laxness ? context: 'Halld�r Kiljan Laxness was born in 1902 in Reykjavik , the capital of Iceland , but spent his youth in the country . From the age of seventeen on , he travelled and lived abroad , chiefly on the European continent . He was influenced by expressionism and other modern currents in Germany and France . In the mid-twenties he was converted to Catholicism ; his spiritual experiences are reflected in several books of an autobiographical nature , chiefly Undir Helgahn�k ( Under the Holy Mountain ) , 1924 . In 1927 , he published his first important novel , Vefarinn mikli fr� Kasm�r ( The Great Weaver from Kashmir ) . Laxness�s religious period did not last long ; during a visit to America he became attracted to socialism . Al�ydub�kin ( The Book of the People ) , 1929 , is evidence of a change toward a socialist outlook . In 1930 , Laxness settled in Iceland . Laxness�s main achievement consists of three novel cycles written during the thirties , dealing with the people of Iceland . �� v�nvi�ur hreini , 1931 , and Fuglinn � fj�runni , 1932 , ( both translated as Salka Valka ) , tell the story of a poor fisher girl ; Sj�lfst�tt f�lk ( Independent People ) , 1934 - 35 , treats the fortunes of small farmers , whereas the tetralogy Lj�s heimsins ( The Light of the World ) , 1937 - 40 , has as its hero an Icelandic folk poet . Laxness�s later works are frequently historical and influenced by the saga tradition : �slandsklukkan ( The Bell of Iceland ) , 1943 - 46 , Gerpla ( The Happy Warriors ) , 1952 , and Parad�sarheimt ( Paradise Reclaimed ) , 1960 . Laxness is also the author of the topical and sharply polemical At�mst��in ( The Atom Station ) , 1948 .' --- # XLMr-ENIS-QA-IsQ-EnA ## Model description This is an Icelandic reading comprehension Q&A model. ## Intended uses & limitations This model is part of my MSc thesis about Q&A for Icelandic. #### How to use ```python from transformers import AutoTokenizer, AutoModelForQuestionAnswering tokenizer = AutoTokenizer.from_pretrained("vesteinn/IceBERT-QA") model = AutoModelForQuestionAnswering.from_pretrained("vesteinn/IceBERT-QA") ``` #### Limitations and bias ## Training data Translated English datasets were used along with the Natural Questions in Icelandic dataset. ## Training procedure ## Eval results ### BibTeX entry and citation info ```bibtex ```
Capreolus/birch-bert-large-car_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: agpl-3.0 language: - is --- # word2vec model trained on Icelandic This model is trained on the lemmas of the Icelandic Gigaword Corpus version 20.05. It is trained using the gensim package, version 4.1.0. and parameters were set to default (100 dimensions, windows size 5) This model can not be loaded directly since it uses gensim, clone the repository and run the following to use it. ```python import gensim model = gensim.models.Word2Vec.load("./rmh.w2v.model") ``` ## Example output ```bash In [6]: model.wv.most_similar("england") Out[6]: [('wales', 0.8113704323768616), ('skotland', 0.7611601948738098), ('bretlandseyjar', 0.7280426621437073), ('gateshead', 0.6975484490394592), ('ástralía', 0.6963852047920227), ('eastbourne', 0.6939234137535095), ('englandi', 0.6908402442932129), ('bath', 0.6849308013916016), ('lynndie', 0.6826340556144714), ('glasgow', 0.6815919876098633)] In [7]: model.wv.most_similar("ísland") Out[7]: [('norðurlönd', 0.6843729615211487), ('land', 0.6696498990058899), ('íslendingur', 0.6645756959915161), ('íslenskur', 0.6627770662307739), ('hérlendis', 0.6609933376312256), ('íslandi', 0.6514216661453247), ('evrópa', 0.6289927959442139), ('fróðskaparsetur', 0.6046777367591858), ('evrópuland', 0.5911464095115662), ('bandaríkin', 0.5906434655189514)] ```
Capreolus/electra-base-msmarco
[ "pytorch", "tf", "electra", "text-classification", "arxiv:2008.09093", "transformers" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
110
null
--- tags: - conversational --- # Jake Peralta DialoGPT Model
dccuchile/albert-base-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
2021-08-30T11:58:16Z
--- tags: - conversational --- # Gandalf DialoGPT model
CennetOguz/distilbert-base-uncased-finetuned-recipe-1
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2021-08-19T09:07:25Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - null model_index: - name: distilgpt2-finetuned-distilgpt2-med_articles results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-distilgpt2-med_articles This model is a fine-tuned version of [vishnun/distilgpt2-finetuned-distilgpt2-med_articles](https://huggingface.co/vishnun/distilgpt2-finetuned-distilgpt2-med_articles) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.3171 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 65 | 3.3417 | | No log | 2.0 | 130 | 3.3300 | | No log | 3.0 | 195 | 3.3231 | | No log | 4.0 | 260 | 3.3172 | | No log | 5.0 | 325 | 3.3171 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate-1
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - null model_index: - name: distilgpt2-finetuned-tamil-gpt results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-tamil-gpt This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.4097 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 228 | 4.4097 | | No log | 2.0 | 456 | 4.4097 | | 4.3169 | 3.0 | 684 | 4.4097 | | 4.3169 | 4.0 | 912 | 4.4097 | | 4.3116 | 5.0 | 1140 | 4.4097 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2021-08-14T04:48:09Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - null model_index: - name: distilgpt2-finetuned-tamilmixsentiment results: - task: name: Causal Language Modeling type: text-generation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-tamilmixsentiment This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.4572 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 5.6438 | 1.0 | 907 | 4.8026 | | 4.774 | 2.0 | 1814 | 4.5953 | | 4.5745 | 3.0 | 2721 | 4.5070 | | 4.4677 | 4.0 | 3628 | 4.4688 | | 4.4294 | 5.0 | 4535 | 4.4572 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
Chaima/TunBerto
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-02-03T15:41:58Z
--- language: - km license: apache-2.0 tags: - automatic-speech-recognition - openslr - robust-speech-event - km - generated_from_trainer - hf-asr-leaderboard datasets: - openslr model-index: - name: wav2vec2-xls-r-1b-km results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: OpenSLR km type: openslr args: km metrics: - name: Test WER type: wer value: 32.13 - name: Test CER type: cer value: 9.35 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: km metrics: - name: Test WER type: wer value: 32.13 - name: Test CER type: cer value: 9.35 --- # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the openslr dataset. It achieves the following results on the evaluation set: - Loss: 0.4239 - Wer: 0.4221 # Evaluation results on OpenSLR "test" (self-split 10%) (Running ./eval.py): - WER: 0.4490281634272114 - CER: 0.12198285179047481 # Evaluation results on OpenSLR "test" with LM ngram (self-split 10%) (Running ./eval.py): - WER: 0.32130107100357 - CER: 0.09345053678218891 # Note - Since this dataset is small (4 hours of voice recording), we decided not to train that for too long to avoid overfitting and under-generalization. - This model performs worse than its 300M-variant. Probably, we don't explore the hyper-parameter enough? ## Installation Install the following libraries on top of HuggingFace Transformers for the supports of language model. ``` pip install pyctcdecode pip install https://github.com/kpu/kenlm/archive/master.zip ``` ## Usage **Approach 1:** Using HuggingFace's pipeline, this will cover everything end-to-end from raw audio input to text output. ```python from transformers import pipeline # Load the model pipe = pipeline(model="vitouphy/wav2vec2-xls-r-300m-khmer") # Process raw audio output = pipe("sound_file.wav", chunk_length_s=10, stride_length_s=(4, 2)) ``` **Approach 2:** More custom way to predict phonemes. ```python from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC import librosa import torch # load model and processor processor = Wav2Vec2Processor.from_pretrained("vitouphy/wav2vec2-xls-r-300m-khmer") model = Wav2Vec2ForCTC.from_pretrained("vitouphy/wav2vec2-xls-r-300m-khmer") # Read and process the input speech_array, sampling_rate = librosa.load("sound_file.wav", sr=16_000) inputs = processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, axis=-1) predicted_sentences = processor.batch_decode(predicted_ids) print(predicted_sentences) ``` ## Intended uses & limitations The data used for this model is only around 4 hours of recordings. - We split into 80/10/10. Hence, the training hour is 3.2 hours, which is very very small. - Yet, its performance is not too bad. Quite interesting for such small dataset, actually. You can try it out. - Its limitation is: - Rare characters, e.g. ឬស្សី ឪឡឹក - Speech needs to be clear and articulate. - More data to cover more vocabulary and character may help improve this system. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 75 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 3.5671 | 5.47 | 400 | 12.0218 | 1.0 | | 3.5159 | 10.95 | 800 | 10.6337 | 1.0 | | 2.4543 | 16.43 | 1200 | 1.8256 | 0.9839 | | 1.9437 | 21.91 | 1600 | 1.1237 | 0.9173 | | 1.696 | 27.39 | 2000 | 0.8246 | 0.7700 | | 1.5342 | 32.87 | 2400 | 0.6433 | 0.6594 | | 1.4509 | 38.35 | 2800 | 0.5500 | 0.5787 | | 1.3478 | 43.83 | 3200 | 0.5070 | 0.4907 | | 1.3096 | 49.31 | 3600 | 0.4692 | 0.4726 | | 1.2532 | 54.79 | 4000 | 0.4448 | 0.4479 | | 1.2291 | 60.27 | 4400 | 0.4374 | 0.4366 | | 1.196 | 65.75 | 4800 | 0.4314 | 0.4310 | | 1.1862 | 71.23 | 5200 | 0.4239 | 0.4221 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0
chainyo/speaker-recognition-meetup
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en - generated_from_trainer - hf-asr-leaderboard - librispeech_asr - robust-speech-event datasets: - librispeech_asr model-index: - name: XLS-R-300M - English results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: clean split: test args: language: en metrics: - name: Test WER type: wer value: 12.29 - name: Test CER type: cer value: 3.34 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: en metrics: - name: Validation WER type: wer value: 36.75 - name: Validation CER type: cer value: 14.83 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 8.0 type: mozilla-foundation/common_voice_8_0 config: en split: test args: language: en metrics: - name: Test WER type: wer value: 37.81 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Test Data type: speech-recognition-community-v2/eval_data args: en metrics: - name: Test WER type: wer value: 38.8 --- # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the librispeech_asr dataset. It achieves the following results on the evaluation set: - Loss: 0.1444 - Wer: 0.1167 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.9365 | 4.17 | 500 | 2.9398 | 0.9999 | | 1.5444 | 8.33 | 1000 | 0.5947 | 0.4289 | | 1.1367 | 12.5 | 1500 | 0.2751 | 0.2366 | | 0.9972 | 16.66 | 2000 | 0.2032 | 0.1797 | | 0.9118 | 20.83 | 2500 | 0.1786 | 0.1479 | | 0.8664 | 24.99 | 3000 | 0.1641 | 0.1408 | | 0.8251 | 29.17 | 3500 | 0.1537 | 0.1267 | | 0.793 | 33.33 | 4000 | 0.1525 | 0.1244 | | 0.785 | 37.5 | 4500 | 0.1470 | 0.1184 | | 0.7612 | 41.66 | 5000 | 0.1446 | 0.1177 | | 0.7478 | 45.83 | 5500 | 0.1449 | 0.1176 | | 0.7443 | 49.99 | 6000 | 0.1444 | 0.1167 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0
ChaitanyaU/FineTuneLM
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-02-01T07:20:48Z
--- language: - ja license: apache-2.0 tags: - automatic-speech-recognition - generated_from_trainer - hf-asr-leaderboard - ja - mozilla-foundation/common_voice_8_0 - robust-speech-event datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: XLS-R-300M - Japanese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 8 type: mozilla-foundation/common_voice_8_0 args: ja metrics: - name: Test WER type: wer value: 54.05 - name: Test CER type: cer value: 27.54 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: ja metrics: - name: Validation WER type: wer value: 48.77 - name: Validation CER type: cer value: 24.87 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Test Data type: speech-recognition-community-v2/eval_data args: ja metrics: - name: Test CER type: cer value: 27.36 --- # This model is for transcribing audio into Hiragana, one format of Japanese language. This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the `mozilla-foundation/common_voice_8_0 dataset`. Note that the following results are achieved by: - Modify `eval.py` to suit the use case. - Since kanji and katakana shares the same sound as hiragana, we convert all texts to hiragana using [pykakasi](https://pykakasi.readthedocs.io) and tokenize them using [fugashi](https://github.com/polm/fugashi). It achieves the following results on the evaluation set: - Loss: 0.7751 - Cer: 0.2227 # Evaluation results (Running ./eval.py): | Model | Metric | Common-Voice-8/test | speech-recognition-community-v2/dev-data | |:--------:|:------:|:-------------------:|:------------------------------------------:| | w/o LM | WER | 0.5964 | 0.5532 | | | CER | 0.2944 | 0.2629 | | w/ LM | WER | 0.5405 | 0.4877 | | | CER | **0.2754** | **0.2487** | ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Cer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 4.4081 | 1.6 | 500 | 4.0983 | 1.0 | | 3.303 | 3.19 | 1000 | 3.3563 | 1.0 | | 3.1538 | 4.79 | 1500 | 3.2066 | 0.9239 | | 2.1526 | 6.39 | 2000 | 1.1597 | 0.3355 | | 1.8726 | 7.98 | 2500 | 0.9023 | 0.2505 | | 1.7817 | 9.58 | 3000 | 0.8219 | 0.2334 | | 1.7488 | 11.18 | 3500 | 0.7915 | 0.2222 | | 1.7039 | 12.78 | 4000 | 0.7751 | 0.2227 | | Stop & Train | | | | | | 1.6571 | 15.97 | 5000 | 0.6788 | 0.1685 | | 1.520400 | 19.16 | 6000 | 0.6095 | 0.1409 | | 1.448200 | 22.35 | 7000 | 0.5843 | 0.1430 | | 1.385400 | 25.54 | 8000 | 0.5699 | 0.1263 | | 1.354200 | 28.73 | 9000 | 0.5686 | 0.1219 | | 1.331500 | 31.92 | 10000 | 0.5502 | 0.1144 | | 1.290800 | 35.11 | 11000 | 0.5371 | 0.1140 | | Stop & Train | | | | | | 1.235200 | 38.30 | 12000 | 0.5394 | 0.1106 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0
Chakita/Friends
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2022-02-02T03:32:17Z
--- language: - km license: apache-2.0 tags: - automatic-speech-recognition - openslr - robust-speech-event - km - generated_from_trainer - hf-asr-leaderboard model-index: - name: xls-r-300m-km results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: OpenSLR km type: openslr args: km metrics: - name: Test WER type: wer value: 25.7 - name: Test CER type: cer value: 7.03 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: km metrics: - name: Test WER type: wer value: 25.7 - name: Test CER type: cer value: 7.03 --- # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the openslr dataset. It achieves the following results on the evaluation set: - Loss: 0.3281 - Wer: 0.3462 # Evaluation results on OpenSLR "test" (self-split 10%) (Running ./eval.py): - WER: 0.3216977389924633 - CER: 0.08653361193169537 # Evaluation results with language model on OpenSLR "test" (self-split 10%) (Running ./eval.py): - WER: 0.257040856802856 - CER: 0.07025001801282513 ## Installation Install the following libraries on top of HuggingFace Transformers for the supports of language model. ``` pip install pyctcdecode pip install https://github.com/kpu/kenlm/archive/master.zip ``` ## Usage **Approach 1:** Using HuggingFace's pipeline, this will cover everything end-to-end from raw audio input to text output. ```python from transformers import pipeline # Load the model pipe = pipeline(model="vitouphy/wav2vec2-xls-r-300m-khmer") # Process raw audio output = pipe("sound_file.wav", chunk_length_s=10, stride_length_s=(4, 2)) ``` **Approach 2:** More custom way to predict phonemes. ```python from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC import librosa import torch # load model and processor processor = Wav2Vec2Processor.from_pretrained("vitouphy/wav2vec2-xls-r-300m-khmer") model = Wav2Vec2ForCTC.from_pretrained("vitouphy/wav2vec2-xls-r-300m-khmer") # Read and process the input speech_array, sampling_rate = librosa.load("sound_file.wav", sr=16_000) inputs = processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, axis=-1) predicted_sentences = processor.batch_decode(predicted_ids) print(predicted_sentences) ``` ## Intended uses & limitations The data used for this model is only around 4 hours of recordings. - We split into 80/10/10. Hence, the training hour is 3.2 hours, which is very very small. - Yet, its performance is not too bad. Quite interesting for such small dataset, actually. You can try it out. - Its limitation is: - Rare characters, e.g. ឬស្សី ឪឡឹក - Speech needs to be clear and articulate. - More data to cover more vocabulary and character may help improve this system. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 100 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 5.0795 | 5.47 | 400 | 4.4121 | 1.0 | | 3.5658 | 10.95 | 800 | 3.5203 | 1.0 | | 3.3689 | 16.43 | 1200 | 2.8984 | 0.9996 | | 2.01 | 21.91 | 1600 | 1.0041 | 0.7288 | | 1.6783 | 27.39 | 2000 | 0.6941 | 0.5989 | | 1.527 | 32.87 | 2400 | 0.5599 | 0.5282 | | 1.4278 | 38.35 | 2800 | 0.4827 | 0.4806 | | 1.3458 | 43.83 | 3200 | 0.4429 | 0.4532 | | 1.2893 | 49.31 | 3600 | 0.4156 | 0.4330 | | 1.2441 | 54.79 | 4000 | 0.4020 | 0.4040 | | 1.188 | 60.27 | 4400 | 0.3777 | 0.3866 | | 1.1628 | 65.75 | 4800 | 0.3607 | 0.3858 | | 1.1324 | 71.23 | 5200 | 0.3534 | 0.3604 | | 1.0969 | 76.71 | 5600 | 0.3428 | 0.3624 | | 1.0897 | 82.19 | 6000 | 0.3387 | 0.3567 | | 1.0625 | 87.66 | 6400 | 0.3339 | 0.3499 | | 1.0601 | 93.15 | 6800 | 0.3288 | 0.3446 | | 1.0474 | 98.62 | 7200 | 0.3281 | 0.3462 | ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0
Chakita/KannadaBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1610 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2137 | 1.0 | 5533 | 1.1625 | | 0.9496 | 2.0 | 11066 | 1.1263 | | 0.7591 | 3.0 | 16599 | 1.1610 | ### Framework versions - Transformers 4.13.0.dev0 - Pytorch 1.10.0+cu102 - Datasets 1.15.1 - Tokenizers 0.10.3
Cheatham/xlm-roberta-large-finetuned-d12
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_v2 model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad_v2 dataset. It achieves the following results on the evaluation set: - Loss: 1.4488 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2159 | 1.0 | 8235 | 1.2378 | | 0.9389 | 2.0 | 16470 | 1.3452 | | 0.7499 | 3.0 | 24705 | 1.4488 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.6
Chester/traffic-rec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
GPT model developed in [Language Models are Few-Shot Butlers](https://arxiv.org/abs/2104.07972).
Chikita1/www_stash_stock
[ "license:bsd-3-clause-clear" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: multilingual datasets: - common_voice tags: - speech - automatic-speech-recognition license: apache-2.0 --- # Wav2Vec2-XLSR-53 [Facebook's XLSR-Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) The base model pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. Note that this model should be fine-tuned on a downstream task, like Automatic Speech Recognition. Check out [this blog](https://huggingface.co/blog/fine-tune-wav2vec2-english) for more information. [Paper](https://arxiv.org/abs/2006.13979) Authors: Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli **Abstract** This paper presents XLSR which learns cross-lingual speech representations by pretraining a single model from the raw waveform of speech in multiple languages. We build on wav2vec 2.0 which is trained by solving a contrastive task over masked latent speech representations and jointly learns a quantization of the latents shared across languages. The resulting model is fine-tuned on labeled data and experiments show that cross-lingual pretraining significantly outperforms monolingual pretraining. On the CommonVoice benchmark, XLSR shows a relative phoneme error rate reduction of 72% compared to the best known results. On BABEL, our approach improves word error rate by 16% relative compared to a comparable system. Our approach enables a single multilingual speech recognition model which is competitive to strong individual models. Analysis shows that the latent discrete speech representations are shared across languages with increased sharing for related languages. We hope to catalyze research in low-resource speech understanding by releasing XLSR-53, a large model pretrained in 53 languages. The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20. # Usage See [this notebook](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb) for more information on how to fine-tune the model. ![model image](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/xlsr_wav2vec2.png)
Chinmay/mlindia
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
#cross_encoder-msmarco-distilbert-word2vec256k-MLM_400k This CrossEncoder was trained with MarginMSE loss from the [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_400k](https://hf.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_400k) checkpoint. **Word embedding matrix has been frozen during training**. You can load the model with [sentence-transformers](https://sbert.net): ```python from sentence_transformers import CrossEncoder from torch import nn model = CrossEncoder(model_name, default_activation_function=nn.Identity()) ``` Performance on TREC Deep Learning (nDCG@10): - TREC-DL 19: 72.62 - TREC-DL 20: 73.22
Chiuchiyin/DialoGPT-small-Donald
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
#cross_encoder-msmarco-distilbert-word2vec256k-MLM_785k_emb_updated This CrossEncoder was trained with MarginMSE loss from the [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_785k_emb_updated](https://hf.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_785k_emb_updated) checkpoint. **Word embedding matrix has been updated during training**. You can load the model with [sentence-transformers](https://sbert.net): ```python from sentence_transformers import CrossEncoder from torch import nn model = CrossEncoder(model_name, default_activation_function=nn.Identity()) ``` Performance on TREC Deep Learning (nDCG@10): - TREC-DL 19: 71.65 - TREC-DL 20: 73.6
Chiuchiyin/Donald
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
#cross_encoder-msmarco-word2vec256k This CrossEncoder was trained with MarginMSE loss from the [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://hf.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) checkpoint. **Word embedding matrix has been frozen during training**. You can load the model with [sentence-transformers](https://sbert.net): ```python from sentence_transformers import CrossEncoder from torch import nn model = CrossEncoder(model_name, default_activation_function=nn.Identity()) ``` Performance on TREC Deep Learning (nDCG@10): - TREC-DL 19: 72.49 - TREC-DL 20: 72.71
ChoboAvenger/DialoGPT-small-DocBot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-bert-base-word2vec256k **Note: Token embeddings where updated!** This model is based on [msmarco-word2vec256000-bert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-bert-base-uncased) with a 256k sized vocabulary initialized with word2vec. It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. Performance: - MS MARCO dev: (evaluating) (MRR@10) - TREC-DL 2019: 67.56 (nDCG@10) - TREC-DL 2020: 71.26 (nDCG@10) ## Usage (Sentence-Transformers) This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 15716 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChoboAvenger/DialoGPT-small-joshua
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-distilbert-word2vec256k-MLM_210k **Note: Token embeddings where updated!** This model is based on [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k](https://huggingface.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_210k) with a 256k sized vocabulary initialized with word2vec that has been trained with MLM for 210k. It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. Performance: - MS MARCO dev: 34.91 (MRR@10) - TREC-DL 2019: 67.56 (nDCG@10) - TREC-DL 2020: 68.18 (nDCG@10) ## Usage (Sentence-Transformers) This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChrisP/xlm-roberta-base-finetuned-marc-en
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-02-21T09:09:23Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-distilbert-word2vec256k-MLM_445k This model is based on [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_445k](https://huggingface.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_445k) with a 256k sized vocabulary initialized with word2vec that has been trained with MLM for 445k steps. **Note: Token embeddings where updated!** It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. **Note: Token embeddings where updated!** Performance: - MS MARCO dev: 34.94 (MRR@10) - TREC-DL 2019: 66.72 (nDCG@10) - TREC-DL 2020: 69.14 (nDCG@10) ## Usage (Sentence-Transformers) This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChrisVCB/DialoGPT-medium-cmjs
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-distilbert-word2vec256k-MLM_785k_emb_updated **Note: Token embeddings where updated!** This model is based on [vocab-transformers/msmarco-distilbert-word2vec256k-MLM_785k_emb_updated](https://huggingface.co/vocab-transformers/msmarco-distilbert-word2vec256k-MLM_785k_emb_updated) with a 256k sized vocabulary initialized with word2vec that has been trained with MLM for 785k. It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. Performance: - MS MARCO dev: 35.20 (MRR@10) - TREC-DL 2019: 67.61 (nDCG@10) - TREC-DL 2020: 69.62 (nDCG@10) # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChrisVCB/DialoGPT-medium-ej
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-distilbert-word2vec256k This model is based on [msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. Performance: - MS MARCO dev: - (MRR@10) - TREC-DL 2019: 65.53 (nDCG@10) - TREC-DL 2020: 67.42 (nDCG@10) - Avg. on 4 BEIR datasets: 38.97 The word embedding matrix has been frozen while training. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChristianOrr/madnet_keras
[ "tensorboard", "dataset:flyingthings-3d", "dataset:kitti", "arxiv:1810.05424", "vision", "deep-stereo", "depth-estimation", "Tensorflow2", "Keras", "license:apache-2.0" ]
depth-estimation
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # dense_encoder-msmarco-distilbert-word2vec256k **Note: Token embeddings where updated!** This model is based on [msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. It has been trained on MS MARCO using [MarginMSELoss](https://github.com/UKPLab/sentence-transformers/blob/master/examples/training/ms_marco/train_bi-encoder_margin-mse.py). See the train_script.py in this repository. Performance: - MS MARCO dev: 34.51 (MRR@10) - TREC-DL 2019: 66.12 (nDCG@10) - TREC-DL 2020: 68.62 (nDCG@10) ## Usage (Sentence-Transformers) This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 7858 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MarginMSELoss.MarginMSELoss` Parameters of the fit()-Method: ``` { "epochs": 30, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 250, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChristopherA08/IndoELECTRA
[ "pytorch", "electra", "pretraining", "id", "dataset:oscar", "transformers" ]
null
{ "architectures": [ "ElectraForPreTraining" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
# Model This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. This model has been trained with MLM on the MS MARCO corpus collection for 210k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs. **Note: Token embeddings where updated!**
Chuah/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
# Model This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. This model has been trained with MLM on the MS MARCO corpus collection for 230k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs. The word embedding matrix was frozen.
ChukSamuels/DialoGPT-small-Dr.FauciBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
# Model This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. This model has been trained with MLM on the MS MARCO corpus collection for 400k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs. The word embedding matrix was frozen.
Chun/DialoGPT-large-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-02-21T08:52:58Z
# Model This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. This model has been trained with MLM on the MS MARCO corpus collection for 445k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs. **Note: Token embeddings where updated!**
Chun/DialoGPT-medium-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2022-02-21T08:55:13Z
# Model This model is based on [nicoladecao/msmarco-word2vec256000-distilbert-base-uncased](https://huggingface.co/nicoladecao/msmarco-word2vec256000-distilbert-base-uncased) with a 256k sized vocabulary initialized with word2vec. This model has been trained with MLM on the MS MARCO corpus collection for 785k steps. See train_mlm.py for the train script. It was run on 2x V100 GPUs. **Note: Token embeddings where updated!**
Chun/w-en2zh-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_base This a albert_chinese_base model from [Google's github](https://github.com/google-research/ALBERT) converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_base' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `感 0.36333346366882324`
Chun/w-en2zh-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2020-03-06T13:21:01Z
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_large This a albert_chinese_large model from [Google's github](https://github.com/google-research/ALBERT) converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_large' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `心 0.9422469735145569`
Chun/w-en2zh-otm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_small This a albert_chinese_small model from [brightmart/albert_zh project](https://github.com/brightmart/albert_zh), albert_small_google_zh model converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_small' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `感 0.6390823125839233`
Chun/w-zh2en-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_tiny This a albert_chinese_tiny model from [brightmart/albert_zh project](https://github.com/brightmart/albert_zh), albert_tiny_google_zh model converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_tiny' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `感 0.40312355756759644`
Chun/w-zh2en-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_xlarge This a albert_chinese_xlarge model from [Google's github](https://github.com/google-research/ALBERT) converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_xlarge' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `心 0.9942440390586853`
Chun/w-zh2en-mto
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: zh pipeline_tag: fill-mask widget: - text: "今天[MASK]情很好" --- # albert_chinese_xxlarge This a albert_chinese_xxlarge model from [Google's github](https://github.com/google-research/ALBERT) converted by huggingface's [script](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_albert_original_tf_checkpoint_to_pytorch.py) ## Notice *Support AutoTokenizer* Since sentencepiece is not used in albert_chinese_base model you have to call BertTokenizer instead of AlbertTokenizer !!! we can eval it using an example on MaskedLM 由於 albert_chinese_base 模型沒有用 sentencepiece 用AlbertTokenizer會載不進詞表,因此需要改用BertTokenizer !!! 我們可以跑MaskedLM預測來驗證這個做法是否正確 ## Justify (驗證有效性) ```python from transformers import AutoTokenizer, AlbertForMaskedLM import torch from torch.nn.functional import softmax pretrained = 'voidful/albert_chinese_xxlarge' tokenizer = AutoTokenizer.from_pretrained(pretrained) model = AlbertForMaskedLM.from_pretrained(pretrained) inputtext = "今天[MASK]情很好" maskpos = tokenizer.encode(inputtext, add_special_tokens=True).index(103) input_ids = torch.tensor(tokenizer.encode(inputtext, add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, labels=input_ids) loss, prediction_scores = outputs[:2] logit_prob = softmax(prediction_scores[0, maskpos],dim=-1).data.tolist() predicted_index = torch.argmax(prediction_scores[0, maskpos]).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] print(predicted_token, logit_prob[predicted_index]) ``` Result: `心 0.995713472366333`
Chungu424/DATA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en datasets: - librispeech tags: - audio - automatic-speech-recognition - speech - asr - hubert license: apache-2.0 metrics: - wer - cer --- # voidful/asr_hubert_cluster_bart_base ## Usage download file ```shell wget https://raw.githubusercontent.com/voidful/hubert-cluster-code/main/km_feat_100_layer_20 wget https://cdn-media.huggingface.co/speech_samples/sample1.flac ``` Hubert kmeans code ```python import joblib import torch from transformers import Wav2Vec2FeatureExtractor, HubertModel import soundfile as sf class HubertCode(object): def __init__(self, hubert_model, km_path, km_layer): self.processor = Wav2Vec2FeatureExtractor.from_pretrained(hubert_model) self.model = HubertModel.from_pretrained(hubert_model) self.km_model = joblib.load(km_path) self.km_layer = km_layer self.C_np = self.km_model.cluster_centers_.transpose() self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) self.C = torch.from_numpy(self.C_np) self.Cnorm = torch.from_numpy(self.Cnorm_np) if torch.cuda.is_available(): self.C = self.C.cuda() self.Cnorm = self.Cnorm.cuda() self.model = self.model.cuda() def __call__(self, filepath, sampling_rate=None): speech, sr = sf.read(filepath) input_values = self.processor(speech, return_tensors="pt", sampling_rate=sr).input_values if torch.cuda.is_available(): input_values = input_values.cuda() hidden_states = self.model(input_values, output_hidden_states=True).hidden_states x = hidden_states[self.km_layer].squeeze() dist = ( x.pow(2).sum(1, keepdim=True) - 2 * torch.matmul(x, self.C) + self.Cnorm ) return dist.argmin(dim=1).cpu().numpy() ``` input ```python hc = HubertCode("facebook/hubert-large-ll60k", './km_feat_100_layer_20', 20) voice_ids = hc('./sample1.flac') ``` bart model ````python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("voidful/asr_hubert_cluster_bart_base") model = AutoModelForSeq2SeqLM.from_pretrained("voidful/asr_hubert_cluster_bart_base") ```` generate output ```python gen_output = model.generate(input_ids=tokenizer("".join([f":vtok{i}:" for i in voice_ids]),return_tensors='pt').input_ids,max_length=1024) print(tokenizer.decode(gen_output[0], skip_special_tokens=True)) ``` ## Result `going along slushy country roads and speaking to damp audience in drifty school rooms day after day for a fortnight he'll have to put in an appearance at some place of worship on sunday morning and he can come to ask immediately afterwards`
Chungu424/repo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - bart - distractor - generation - seq2seq datasets: - race metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "When you ' re having a holiday , one of the main questions to ask is which hotel or apartment to choose . However , when it comes to France , you have another special choice : treehouses . In France , treehouses are offered to travelers as a new choice in many places . The price may be a little higher , but you do have a chance to _ your childhood memories . Alain Laurens , one of France ' s top treehouse designers , said , ' Most of the people might have the experience of building a den when they were young . And they like that feeling of freedom when they are children . ' Its fairy - tale style gives travelers a special feeling . It seems as if they are living as a forest king and enjoying the fresh air in the morning . Another kind of treehouse is the ' star cube ' . It gives travelers the chance of looking at the stars shining in the sky when they are going to sleep . Each ' star cube ' not only offers all the comfortable things that a hotel provides for travelers , but also gives them a chance to look for stars by using a telescope . The glass roof allows you to look at the stars from your bed . </s> The passage mainly tells us </s> treehouses in france." --- # bart-distractor-generation-both ## Model description This model is a sequence-to-sequence distractor generator which takes an answer, question and context as an input, and generates a distractor as an output. It is based on a pretrained `bart-base` model. This model trained with Parallel MLM & Answer Negative Regularization refer to the [Paper](https://www.aclweb.org/anthology/2020.findings-emnlp.393/). For details, please see https://github.com/voidful/BDG. ## Intended uses & limitations The model is trained to generate examinations-style multiple choice distractor. The model performs best with full sentence answers. #### How to use The model takes concatenated context, question and answers as an input sequence, and will generate a full distractor sentence as an output sequence. The max sequence length is 1024 tokens. Inputs should be organised into the following format: ``` context </s> question </s> answer ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method. For details, please see https://github.com/voidful/BDG. #### Limitations and bias The model is limited to generating distractor in the same style as those found in [RACE](https://www.aclweb.org/anthology/D17-1082/). The generated distractors can potentially be leading or reflect biases that are present in the context. If the context is too short or completely absent, or if the context, question and answer do not match, the generated distractor is likely to be incoherent.
Chungu424/repodata
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - bart - distractor - generation - seq2seq datasets: - race metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "When you ' re having a holiday , one of the main questions to ask is which hotel or apartment to choose . However , when it comes to France , you have another special choice : treehouses . In France , treehouses are offered to travelers as a new choice in many places . The price may be a little higher , but you do have a chance to _ your childhood memories . Alain Laurens , one of France ' s top treehouse designers , said , ' Most of the people might have the experience of building a den when they were young . And they like that feeling of freedom when they are children . ' Its fairy - tale style gives travelers a special feeling . It seems as if they are living as a forest king and enjoying the fresh air in the morning . Another kind of treehouse is the ' star cube ' . It gives travelers the chance of looking at the stars shining in the sky when they are going to sleep . Each ' star cube ' not only offers all the comfortable things that a hotel provides for travelers , but also gives them a chance to look for stars by using a telescope . The glass roof allows you to look at the stars from your bed . </s> The passage mainly tells us </s> treehouses in france." --- # bart-distractor-generation-pm ## Model description This model is a sequence-to-sequence distractor generator which takes an answer, question and context as an input, and generates a distractor as an output. It is based on a pretrained `bart-base` model. This model trained with Parallel MLM refer to the [Paper](https://www.aclweb.org/anthology/2020.findings-emnlp.393/). For details, please see https://github.com/voidful/BDG. ## Intended uses & limitations The model is trained to generate examinations-style multiple choice distractor. The model performs best with full sentence answers. #### How to use The model takes concatenated context, question and answers as an input sequence, and will generate a full distractor sentence as an output sequence. The max sequence length is 1024 tokens. Inputs should be organised into the following format: ``` context </s> question </s> answer ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method. #### Limitations and bias The model is limited to generating distractor in the same style as those found in [RACE](https://www.aclweb.org/anthology/D17-1082/). The generated distractors can potentially be leading or reflect biases that are present in the context. If the context is too short or completely absent, or if the context, question and answer do not match, the generated distractor is likely to be incoherent.
Chuu/Chumar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - bart - distractor - generation - seq2seq datasets: - race metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "When you ' re having a holiday , one of the main questions to ask is which hotel or apartment to choose . However , when it comes to France , you have another special choice : treehouses . In France , treehouses are offered to travelers as a new choice in many places . The price may be a little higher , but you do have a chance to _ your childhood memories . Alain Laurens , one of France ' s top treehouse designers , said , ' Most of the people might have the experience of building a den when they were young . And they like that feeling of freedom when they are children . ' Its fairy - tale style gives travelers a special feeling . It seems as if they are living as a forest king and enjoying the fresh air in the morning . Another kind of treehouse is the ' star cube ' . It gives travelers the chance of looking at the stars shining in the sky when they are going to sleep . Each ' star cube ' not only offers all the comfortable things that a hotel provides for travelers , but also gives them a chance to look for stars by using a telescope . The glass roof allows you to look at the stars from your bed . </s> The passage mainly tells us </s> treehouses in france." --- # bart-distractor-generation ## Model description This model is a sequence-to-sequence distractor generator which takes an answer, question and context as an input, and generates a distractor as an output. It is based on a pretrained `bart-base` model. For details, please see https://github.com/voidful/BDG. ## Intended uses & limitations The model is trained to generate examinations-style multiple choice distractor. The model performs best with full sentence answers. #### How to use The model takes concatenated context, question and answers as an input sequence, and will generate a full distractor sentence as an output sequence. The max sequence length is 1024 tokens. Inputs should be organised into the following format: ``` context </s> question </s> answer ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method. For details, please see https://github.com/voidful/BDG. #### Limitations and bias The model is limited to generating distractor in the same style as those found in [RACE](https://www.aclweb.org/anthology/D17-1082/). The generated distractors can potentially be leading or reflect biases that are present in the context. If the context is too short or completely absent, or if the context, question and answer do not match, the generated distractor is likely to be incoherent.
Ci/Pai
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - bart - question - generation - seq2seq datasets: - eqg-race metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "When you ' re having a holiday , one of the main questions to ask is which hotel or apartment to choose . However , when it comes to France , you have another special choice : treehouses . In France , treehouses are offered to travelers as a new choice in many places . The price may be a little higher , but you do have a chance to _ your childhood memories . Alain Laurens , one of France ' s top treehouse designers , said , ' Most of the people might have the experience of building a den when they were young . And they like that feeling of freedom when they are children . ' Its fairy - tale style gives travelers a special feeling . It seems as if they are living as a forest king and enjoying the fresh air in the morning . Another kind of treehouse is the ' star cube ' . It gives travelers the chance of looking at the stars shining in the sky when they are going to sleep . Each ' star cube ' not only offers all the comfortable things that a hotel provides for travelers , but also gives them a chance to look for stars by using a telescope . The glass roof allows you to look at the stars from your bed . " --- # voidful/bart-eqg-question-generator ## Model description This model is a sequence-to-sequence question generator with only the context as an input, and generates a question as an output. It is based on a pretrained `bart-base` model, and trained on [EQG-RACE](https://github.com/jemmryx/EQG-RACE) corpus. ## Intended uses & limitations The model is trained to generate examinations-style multiple choice question. #### How to use The model takes context as an input sequence, and will generate a question as an output sequence. The max sequence length is 1024 tokens. Inputs should be organised into the following format: ``` context ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method.
Ciruzzo/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en tags: - bart - question - generation - seq2seq datasets: - unifiedQA metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "Harry Potter is a series of seven fantasy novels written by British author J. K. Rowling. The novels chronicle the lives of a young wizard, Harry Potter, and his friends Hermione Granger and Ron Weasley, all of whom are students at Hogwarts School of Witchcraft and Wizardry. The main story arc concerns Harry's struggle against Lord Voldemort, a dark wizard who intends to become immortal, overthrow the wizard governing body known as the Ministry of Magic and subjugate all wizards and Muggles(non-magical people)." --- # context-only-question-generator ## Model description This model is a sequence-to-sequence question generator which takes context as an input, and generates a question as an output. It is based on a pretrained `bart-base` model. #### How to use Inputs should be organised into the following format: ``` context ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method.
Ciruzzo/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: multilingual datasets: - NQ - Trivia - SQuAD - MLQA - DRCD --- # dpr-ctx_encoder-bert-base-multilingual ## Description Multilingual DPR Model base on bert-base-multilingual-cased. [DPR model](https://arxiv.org/abs/2004.04906) [DPR repo](https://github.com/facebookresearch/DPR) ## Data 1. [NQ](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 2. [Trivia](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 3. [SQuAD](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 4. [DRCD*](https://github.com/DRCKnowledgeTeam/DRCD) 5. [MLQA*](https://github.com/facebookresearch/MLQA) `question pairs for train`: 644,217 `question pairs for dev`: 73,710 *DRCD and MLQA are converted using script from haystack [squad_to_dpr.py](https://github.com/deepset-ai/haystack/blob/master/haystack/retriever/squad_to_dpr.py) ## Training Script I use the script from [haystack](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial9_DPR_training.ipynb) ## Usage ```python from transformers import DPRContextEncoder, DPRContextEncoderTokenizer tokenizer = DPRContextEncoderTokenizer.from_pretrained('voidful/dpr-ctx_encoder-bert-base-multilingual') model = DPRContextEncoder.from_pretrained('voidful/dpr-ctx_encoder-bert-base-multilingual') input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"] embeddings = model(input_ids).pooler_output ``` Follow the tutorial from `haystack`: [Better Retrievers via "Dense Passage Retrieval"](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb) ``` from haystack.retriever.dense import DensePassageRetriever retriever = DensePassageRetriever(document_store=document_store, query_embedding_model="voidful/dpr-question_encoder-bert-base-multilingual", passage_embedding_model="voidful/dpr-ctx_encoder-bert-base-multilingual", max_seq_len_query=64, max_seq_len_passage=256, batch_size=16, use_gpu=True, embed_title=True, use_fast_tokenizers=True) ```
Ciruzzo/DialoGPT-small-hattypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: multilingual datasets: - NQ - Trivia - SQuAD - MLQA - DRCD --- # dpr-ctx_encoder-bert-base-multilingual ## Description Multilingual DPR Model base on bert-base-multilingual-cased. [DPR model](https://arxiv.org/abs/2004.04906) [DPR repo](https://github.com/facebookresearch/DPR) ## Data 1. [NQ](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 2. [Trivia](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 3. [SQuAD](https://github.com/facebookresearch/DPR/blob/master/data/download_data.py) 4. [DRCD*](https://github.com/DRCKnowledgeTeam/DRCD) 5. [MLQA*](https://github.com/facebookresearch/MLQA) `question pairs for train`: 644,217 `question pairs for dev`: 73,710 *DRCD and MLQA are converted using script from haystack [squad_to_dpr.py](https://github.com/deepset-ai/haystack/blob/master/haystack/retriever/squad_to_dpr.py) ## Training Script I use the script from [haystack](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial9_DPR_training.ipynb) ## Usage ```python from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('voidful/dpr-question_encoder-bert-base-multilingual') model = DPRQuestionEncoder.from_pretrained('voidful/dpr-question_encoder-bert-base-multilingual') input_ids = tokenizer("Hello, is my dog cute ?", return_tensors='pt')["input_ids"] embeddings = model(input_ids).pooler_output ``` Follow the tutorial from `haystack`: [Better Retrievers via "Dense Passage Retrieval"](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb) ``` from haystack.retriever.dense import DensePassageRetriever retriever = DensePassageRetriever(document_store=document_store, query_embedding_model="voidful/dpr-question_encoder-bert-base-multilingual", passage_embedding_model="voidful/dpr-ctx_encoder-bert-base-multilingual", max_seq_len_query=64, max_seq_len_passage=256, batch_size=16, use_gpu=True, embed_title=True, use_fast_tokenizers=True) ```
CleveGreen/FieldClassifier_v2_gpt
[ "pytorch", "gpt2", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "GPT2ForSequenceClassification" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- language: en datasets: - librispeech tags: - audio - automatic-speech-recognition - speech - asr - hubert license: apache-2.0 metrics: - wer - cer --- # voidful/tts_hubert_cluster_bart_base ## Usage ````python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("voidful/tts_hubert_cluster_bart_base") model = AutoModelForSeq2SeqLM.from_pretrained("voidful/tts_hubert_cluster_bart_base") ```` generate output ```python gen_output = model.generate(input_ids=tokenizer("going along slushy country roads and speaking to damp audience in drifty school rooms day after day for a fortnight he'll have to put in an appearance at some place of worship on sunday morning and he can come to ask immediately afterwards",return_tensors='pt').input_ids, max_length=1024) print(tokenizer.decode(gen_output[0], skip_special_tokens=True)) ``` ## Result `:vtok402::vtok329::vtok329::vtok75::vtok75::vtok75::vtok44::vtok150::vtok150::vtok222::vtok280::vtok280::vtok138::vtok409::vtok409::vtok409::vtok46::vtok441:`
CleveGreen/JobClassifier_v2
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- language: en tags: - bart - question - generation - seq2seq datasets: - unifiedQA metrics: - bleu - rouge pipeline_tag: text2text-generation widget: - text: "treehouses in france. \n When you ' re having a holiday , one of the main questions to ask is which hotel or apartment to choose . However , when it comes to France , you have another special choice : treehouses . In France , treehouses are offered to travelers as a new choice in many places . The price may be a little higher , but you do have a chance to _ your childhood memories . Alain Laurens , one of France ' s top treehouse designers , said , ' Most of the people might have the experience of building a den when they were young . And they like that feeling of freedom when they are children . ' Its fairy - tale style gives travelers a special feeling . It seems as if they are living as a forest king and enjoying the fresh air in the morning . Another kind of treehouse is the ' star cube ' . It gives travelers the chance of looking at the stars shining in the sky when they are going to sleep . Each ' star cube ' not only offers all the comfortable things that a hotel provides for travelers , but also gives them a chance to look for stars by using a telescope . The glass roof allows you to look at the stars from your bed ." --- # unifiedqg-bart-base ## Model description This model is a sequence-to-sequence question generator which takes an answer and context as an input, and generates a question as an output. It is based on a pretrained `bart-base` model. #### How to use The model takes concatenated context and answers as an input sequence, and will generate a full distractor sentence as an output sequence. The max sequence length is 1024 tokens. Inputs should be organised into the following format: ``` answer \n context ``` The input sequence can then be encoded and passed as the `input_ids` argument in the model's `generate()` method.
CleveGreen/JobClassifier_v2_gpt
[ "pytorch", "gpt2", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "GPT2ForSequenceClassification" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: zh-HK datasets: - common_voice tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - robust-speech-event - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Cantonese (Hong Kong) by Voidful results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice zh-HK type: common_voice args: zh-HK metrics: - name: Test CER type: cer value: 16.41 --- # Wav2Vec2-Large-XLSR-53-hk Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Cantonese using the [Common Voice](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage [Colab trial](https://colab.research.google.com/drive/1nBRLf4Pwiply_y5rXWoaIB8LxX41tfEI?usp=sharing) ``` import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys model_name = "voidful/wav2vec2-large-xlsr-53-hk" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-hk" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\\"#$%&()*+,\\-.\\:;<=>?@\\[\\]\\\\\\/^_`{|}~]" model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def load_file_to_data(file): batch = {} speech, _ = torchaudio.load(file) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq return batch def predict(data): features = processor(data["speech"], sampling_rate=data["sampling_rate"], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits pred_ids = torch.argmax(logits, dim=-1) return processor.batch_decode(pred_ids) ``` Predict ```python predict(load_file_to_data('voice file path')) ``` ## Evaluation The model can be evaluated as follows on the Cantonese (Hong Kong) test data of Common Voice. CER calculation refer to https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese ```python !mkdir cer !wget -O cer/cer.py https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese/raw/main/cer.py !pip install jiwer import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys cer = load_metric("./cer") model_name = "voidful/wav2vec2-large-xlsr-53-hk" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-hk" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\\"#$%&()*+,\\-.\\:;<=>?@\\[\\]\\\\\\/^_`{|}~]" model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-HK', data_dir="./cv-corpus-6.1-2020-12-11", split="test") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits pred_ids = torch.argmax(logits, dim=-1) batch["predicted"] = processor.batch_decode(pred_ids) batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys())) print("CER: {:2f}".format(100 * cer.compute(predictions=result["predicted"], references=result["target"]))) ``` `CER 16.41`
Clint/clinton
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: zh-TW datasets: - common_voice tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - robust-speech-event - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Taiwanese Mandarin(zh-tw) by Voidful results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice zh-TW type: common_voice args: zh-TW metrics: - name: Test CER type: cer value: 18.36 --- # Wav2Vec2-Large-XLSR-53-tw-gpt Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on zh-tw using the [Common Voice](https://huggingface.co/datasets/common_voice). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage [Colab trial](https://colab.research.google.com/drive/1e_z5jQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing) ``` import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, AutoTokenizer, AutoModelWithLMHead ) import torch import re import sys model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") gpt_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def load_file_to_data(file): batch = {} speech, _ = torchaudio.load(file) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq return batch def predict(data): features = processor(data["speech"], sampling_rate=data["sampling_rate"], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) gpt_input = torch.cat((torch.tensor([tokenizer.cls_token_id]).to(device),pred_ids[pred_ids>0]), 0) gpt_prob = torch.nn.functional.softmax(gpt_model(gpt_input).logits, dim=-1)[:voice_prob.size()[0],:] comb_pred_ids = torch.argmax(gpt_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) return decoded_results ``` Predict ```python predict(load_file_to_data('voice file path')) ``` ## Evaluation The model can be evaluated as follows on the zh-tw test data of Common Voice. CER calculation refer to https://huggingface.co/ctl/wav2vec2-large-xlsr-cantonese env setup: ``` !pip install editdistance !pip install torchaudio !pip install datasets transformers ``` ## Evaluation without LM: ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from transformers import AutoTokenizer, AutoModelWithLMHead from datasets import Audio from math import log model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") lm_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', split="test") ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) def map_to_array(batch): audio = batch["audio"] batch["speech"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["sampling_rate"] = audio["sampling_rate"] batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits pred_ids = torch.argmax(logits, dim=-1) batch["predicted"] = processor.batch_decode(pred_ids) batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=3, remove_columns=list(ds.features.keys())) def cer_cal(groundtruth, hypothesis): err = 0 tot = 0 for p, t in zip(hypothesis, groundtruth): err += float(ed.eval(p.lower(), t.lower())) tot += len(t) return err / tot print("CER: {:2f}".format(100 * cer_cal(result["target"],result["predicted"]))) ``` `CER: 28.70`. `TIME: 04:08 min` ## Evaluation with GPT: ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from transformers import AutoTokenizer, AutoModelWithLMHead from datasets import Audio from math import log model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") lm_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', split="test") ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) def map_to_array(batch): audio = batch["audio"] batch["speech"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["sampling_rate"] = audio["sampling_rate"] batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) lm_input = torch.cat((torch.tensor([tokenizer.cls_token_id]).to(device),pred_ids[pred_ids>0]), 0) lm_prob = torch.nn.functional.softmax(lm_model(lm_input).logits, dim=-1)[:voice_prob.size()[0],:] comb_pred_ids = torch.argmax(lm_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) batch["predicted"] = decoded_results batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=3, remove_columns=list(ds.features.keys())) def cer_cal(groundtruth, hypothesis): err = 0 tot = 0 for p, t in zip(hypothesis, groundtruth): err += float(ed.eval(p.lower(), t.lower())) tot += len(t) return err / tot print("CER: {:2f}".format(100 * cer_cal(result["target"],result["predicted"]))) ``` `CER 25.70`. `TIME: 06:04 min` ## Evaluation with GPT + beam search: ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from transformers import AutoTokenizer, AutoModelWithLMHead from datasets import Audio from math import log model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("ckiplab/gpt2-base-chinese") lm_model = AutoModelWithLMHead.from_pretrained("ckiplab/gpt2-base-chinese").to(device) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', split="test") ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) def map_to_array(batch): audio = batch["audio"] batch["speech"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] batch["sampling_rate"] = audio["sampling_rate"] batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: sequences = [[[], 1.0]] pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) while True: all_candidates = list() exceed = False for seq in sequences: tokens, score = seq gpt_input = torch.tensor([tokenizer.cls_token_id]+tokens).to(device) gpt_prob = torch.nn.functional.softmax(lm_model(gpt_input).logits, dim=-1)[:len(gpt_input),:] if len(gpt_input) >= len(voice_prob): exceed = True comb_pred_ids = gpt_prob*voice_prob[:len(gpt_input)] v,i = torch.topk(comb_pred_ids,50,dim=-1) for tok_id,tok_prob in zip(i.tolist()[-1],v.tolist()[-1]): candidate = [tokens + [tok_id], score + -log(tok_prob)] all_candidates.append(candidate) ordered = sorted(all_candidates, key=lambda tup: tup[1]) sequences = ordered[:10] if exceed: break decoded_results.append(processor.decode(sequences[0][0])) batch["predicted"] = decoded_results batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=3, remove_columns=list(ds.features.keys())) def cer_cal(groundtruth, hypothesis): err = 0 tot = 0 for p, t in zip(hypothesis, groundtruth): err += float(ed.eval(p.lower(), t.lower())) tot += len(t) return err / tot print("CER: {:2f}".format(100 * cer_cal(result["target"],result["predicted"]))) ``` `CER 18.36`. ## Evaluation with BERT: ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from transformers import AutoTokenizer, AutoModelForMaskedLM model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese") lm_model = AutoModelForMaskedLM.from_pretrained("bert-base-chinese").to(device) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', data_dir="./cv-corpus-6.1-2020-12-11", split="test") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = ~pred_ids.eq(tokenizer.pad_token_id).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) lm_input = torch.masked_select(pred_ids, ~pred_ids.eq(tokenizer.pad_token_id)).unsqueeze(0) mask_lm_prob = voice_prob.clone() for i in range(lm_input.shape[-1]): masked_lm_input = lm_input.clone() masked_lm_input[0][i] = torch.tensor(tokenizer.mask_token_id).to('cuda') lm_prob = torch.nn.functional.softmax(lm_model(masked_lm_input).logits, dim=-1).squeeze(0) mask_lm_prob[i] = lm_prob[i] comb_pred_ids = torch.argmax(mask_lm_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) batch["predicted"] = decoded_results batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=1, remove_columns=list(ds.features.keys())) def cer_cal(groundtruth, hypothesis): err = 0 tot = 0 for p, t in zip(hypothesis, groundtruth): err += float(ed.eval(p.lower(), t.lower())) tot += len(t) return err / tot print("CER: {:2f}".format(100 * cer_cal(result["target"],result["predicted"]))) ``` `CER 25.57`. `TIME: 09:49 min` ## Evaluation with T-TA: setup ``` !git clone https://github.com/voidful/pytorch-tta.git !mv ./pytorch-tta/tta ./tta !wget https://github.com/voidful/pytorch-tta/releases/download/wiki_zh/wiki_zh.pt ``` ```python import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, ) import torch import re import sys from tta.modeling_tta import TTALMModel from transformers import AutoTokenizer import torch model_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" device = "cuda" processor_name = "voidful/wav2vec2-large-xlsr-53-tw-gpt" chars_to_ignore_regex = r"[¥•"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、 、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·'℃°•·.﹑︰〈〉─《﹖﹣﹂﹁﹔!?。。"#$%&'()*+,﹐-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏..!\"#$%&()*+,\-.\:;<=>?@\[\]\\\/^_`{|}~]" tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese") lm_model = TTALMModel("bert-base-chinese") tokenizer = AutoTokenizer.from_pretrained("bert-base-chinese") lm_model.load_state_dict(torch.load("./wiki_zh.pt",map_location=torch.device('cuda'))) lm_model.to('cuda') lm_model.eval() model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) ds = load_dataset("common_voice", 'zh-TW', data_dir="./cv-corpus-6.1-2020-12-11", split="test") resampler = torchaudio.transforms.Resample(orig_freq=48_000, new_freq=16_000) def map_to_array(batch): speech, _ = torchaudio.load(batch["path"]) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower().replace("’", "'") return batch ds = ds.map(map_to_array) def map_to_pred(batch): features = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = ~pred_ids.eq(tokenizer.pad_token_id).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) lm_input = torch.masked_select(pred_ids, ~pred_ids.eq(tokenizer.pad_token_id)).unsqueeze(0) lm_prob = torch.nn.functional.softmax(lm_model.forward(lm_input)[0], dim=-1).squeeze(0) comb_pred_ids = torch.argmax(lm_prob*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) batch["predicted"] = decoded_results batch["target"] = batch["sentence"] return batch result = ds.map(map_to_pred, batched=True, batch_size=16, remove_columns=list(ds.features.keys())) def cer_cal(groundtruth, hypothesis): err = 0 tot = 0 for p, t in zip(hypothesis, groundtruth): err += float(ed.eval(p.lower(), t.lower())) tot += len(t) return err / tot print("CER: {:2f}".format(100 * cer_cal(result["target"],result["predicted"]))) ``` `CER: 25.77`. `TIME: 06:01 min`
Cloudy/DialoGPT-CJ-large
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - multilingual - ar - as - br - ca - cnh - cs - cv - cy - de - dv - el - en - eo - es - et - eu - fa - fi - fr - hi - hsb - hu - ia - id - ja - ka - ky - lg - lt - ly - mn - mt - nl - or - pl - pt - ro - ru - sah - sl - ta - th - tr - tt - uk - vi license: apache-2.0 tags: - audio - automatic-speech-recognition - hf-asr-leaderboard - robust-speech-event - speech - xlsr-fine-tuning-week datasets: - common_voice language_bcp47: - fy-NL - ga-IE - pa-IN - rm-sursilv - rm-vallader - sy-SE - zh-CN - zh-HK - zh-TW model-index: - name: XLSR Wav2Vec2 for 56 language by Voidful results: - task: type: automatic-speech-recognition name: Speech Recognition dataset: name: Common Voice type: common_voice metrics: - type: cer value: 23.21 name: Test CER --- # Model Card for wav2vec2-xlsr-multilingual-56 # Model Details ## Model Description - **Developed by:** voidful - **Shared by [Optional]:** Hugging Face - **Model type:** automatic-speech-recognition - **Language(s) (NLP):** multilingual (*56 language, 1 model Multilingual ASR*) - **License:** Apache-2.0 - **Related Models:** - **Parent Model:** wav2vec - **Resources for more information:** - [GitHub Repo](https://github.com/voidful/wav2vec2-xlsr-multilingual-56) - [Model Space](https://huggingface.co/spaces/Kamtera/Persian_Automatic_Speech_Recognition_and-more) # Uses ## Direct Use This model can be used for the task of automatic-speech-recognition ## Downstream Use [Optional] More information needed ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Bias, Risks, and Limitations Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. ## Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. # Training Details ## Training Data See the [common_voice dataset card](https://huggingface.co/datasets/common_voice) Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on 56 language using the [Common Voice](https://huggingface.co/datasets/common_voice). ## Training Procedure ### Preprocessing More information needed ### Speeds, Sizes, Times When using this model, make sure that your speech input is sampled at 16kHz. # Evaluation ## Testing Data, Factors & Metrics ### Testing Data More information needed ### Factors ### Metrics More information needed ## Results <details> <summary> Click to expand </summary> | Common Voice Languages | Num. of data | Hour | WER | CER | |------------------------|--------------|--------|--------|-------| | ar | 21744 | 81.5 | 75.29 | 31.23 | | as | 394 | 1.1 | 95.37 | 46.05 | | br | 4777 | 7.4 | 93.79 | 41.16 | | ca | 301308 | 692.8 | 24.80 | 10.39 | | cnh | 1563 | 2.4 | 68.11 | 23.10 | | cs | 9773 | 39.5 | 67.86 | 12.57 | | cv | 1749 | 5.9 | 95.43 | 34.03 | | cy | 11615 | 106.7 | 67.03 | 23.97 | | de | 262113 | 822.8 | 27.03 | 6.50 | | dv | 4757 | 18.6 | 92.16 | 30.15 | | el | 3717 | 11.1 | 94.48 | 58.67 | | en | 580501 | 1763.6 | 34.87 | 14.84 | | eo | 28574 | 162.3 | 37.77 | 6.23 | | es | 176902 | 337.7 | 19.63 | 5.41 | | et | 5473 | 35.9 | 86.87 | 20.79 | | eu | 12677 | 90.2 | 44.80 | 7.32 | | fa | 12806 | 290.6 | 53.81 | 15.09 | | fi | 875 | 2.6 | 93.78 | 27.57 | | fr | 314745 | 664.1 | 33.16 | 13.94 | | fy-NL | 6717 | 27.2 | 72.54 | 26.58 | | ga-IE | 1038 | 3.5 | 92.57 | 51.02 | | hi | 292 | 2.0 | 90.95 | 57.43 | | hsb | 980 | 2.3 | 89.44 | 27.19 | | hu | 4782 | 9.3 | 97.15 | 36.75 | | ia | 5078 | 10.4 | 52.00 | 11.35 | | id | 3965 | 9.9 | 82.50 | 22.82 | | it | 70943 | 178.0 | 39.09 | 8.72 | | ja | 1308 | 8.2 | 99.21 | 62.06 | | ka | 1585 | 4.0 | 90.53 | 18.57 | | ky | 3466 | 12.2 | 76.53 | 19.80 | | lg | 1634 | 17.1 | 98.95 | 43.84 | | lt | 1175 | 3.9 | 92.61 | 26.81 | | lv | 4554 | 6.3 | 90.34 | 30.81 | | mn | 4020 | 11.6 | 82.68 | 30.14 | | mt | 3552 | 7.8 | 84.18 | 22.96 | | nl | 14398 | 71.8 | 57.18 | 19.01 | | or | 517 | 0.9 | 90.93 | 27.34 | | pa-IN | 255 | 0.8 | 87.95 | 42.03 | | pl | 12621 | 112.0 | 56.14 | 12.06 | | pt | 11106 | 61.3 | 53.24 | 16.32 | | rm-sursilv | 2589 | 5.9 | 78.17 | 23.31 | | rm-vallader | 931 | 2.3 | 73.67 | 21.76 | | ro | 4257 | 8.7 | 83.84 | 21.95 | | ru | 23444 | 119.1 | 61.83 | 15.18 | | sah | 1847 | 4.4 | 94.38 | 38.46 | | sl | 2594 | 6.7 | 84.21 | 20.54 | | sv-SE | 4350 | 20.8 | 83.68 | 30.79 | | ta | 3788 | 18.4 | 84.19 | 21.60 | | th | 4839 | 11.7 | 141.87 | 37.16 | | tr | 3478 | 22.3 | 66.77 | 15.55 | | tt | 13338 | 26.7 | 86.80 | 33.57 | | uk | 7271 | 39.4 | 70.23 | 14.34 | | vi | 421 | 1.7 | 96.06 | 66.25 | | zh-CN | 27284 | 58.7 | 89.67 | 23.96 | | zh-HK | 12678 | 92.1 | 81.77 | 18.82 | | zh-TW | 6402 | 56.6 | 85.08 | 29.07 | </details> # Model Examination More information needed # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective More information needed ## Compute Infrastructure More information needed ### Hardware More information needed ### Software More information needed # Citation **BibTeX:** ``` More information needed ``` **APA:** ``` More information needed ``` # Glossary [optional] More information needed # More Information [optional] More information needed # Model Card Authors [optional] voidful in collaboration with Ezi Ozoani and the Hugging Face team # Model Card Contact More information needed # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ## Env setup: ``` !pip install torchaudio !pip install datasets transformers !pip install asrp !wget -O lang_ids.pk https://huggingface.co/voidful/wav2vec2-xlsr-multilingual-56/raw/main/lang_ids.pk ``` ## Usage ``` import torchaudio from datasets import load_dataset, load_metric from transformers import ( Wav2Vec2ForCTC, Wav2Vec2Processor, AutoTokenizer, AutoModelWithLMHead ) import torch import re import sys import soundfile as sf model_name = "voidful/wav2vec2-xlsr-multilingual-56" device = "cuda" processor_name = "voidful/wav2vec2-xlsr-multilingual-56" import pickle with open("lang_ids.pk", 'rb') as output: lang_ids = pickle.load(output) model = Wav2Vec2ForCTC.from_pretrained(model_name).to(device) processor = Wav2Vec2Processor.from_pretrained(processor_name) model.eval() def load_file_to_data(file,sampling_rate=16_000): batch = {} speech, _ = torchaudio.load(file) if sampling_rate != '16_000' or sampling_rate != '16000': resampler = torchaudio.transforms.Resample(orig_freq=sampling_rate, new_freq=16_000) batch["speech"] = resampler.forward(speech.squeeze(0)).numpy() batch["sampling_rate"] = resampler.new_freq else: batch["speech"] = speech.squeeze(0).numpy() batch["sampling_rate"] = '16000' return batch def predict(data): features = processor(data["speech"], sampling_rate=data["sampling_rate"], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = pred_ids.ge(1).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) comb_pred_ids = torch.argmax(voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) return decoded_results def predict_lang_specific(data,lang_code): features = processor(data["speech"], sampling_rate=data["sampling_rate"], padding=True, return_tensors="pt") input_values = features.input_values.to(device) attention_mask = features.attention_mask.to(device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits decoded_results = [] for logit in logits: pred_ids = torch.argmax(logit, dim=-1) mask = ~pred_ids.eq(processor.tokenizer.pad_token_id).unsqueeze(-1).expand(logit.size()) vocab_size = logit.size()[-1] voice_prob = torch.nn.functional.softmax((torch.masked_select(logit, mask).view(-1,vocab_size)),dim=-1) filtered_input = pred_ids[pred_ids!=processor.tokenizer.pad_token_id].view(1,-1).to(device) if len(filtered_input[0]) == 0: decoded_results.append("") else: lang_mask = torch.empty(voice_prob.shape[-1]).fill_(0) lang_index = torch.tensor(sorted(lang_ids[lang_code])) lang_mask.index_fill_(0, lang_index, 1) lang_mask = lang_mask.to(device) comb_pred_ids = torch.argmax(lang_mask*voice_prob, dim=-1) decoded_results.append(processor.decode(comb_pred_ids)) return decoded_results predict(load_file_to_data('audio file path',sampling_rate=16_000)) # beware of the audio file sampling rate predict_lang_specific(load_file_to_data('audio file path',sampling_rate=16_000),'en') # beware of the audio file sampling rate ``` ```python {{ get_started_code | default("More information needed", true)}} ``` </details>
CodeMonkey98/distilroberta-base-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer datasets: - null model-index: - name: BiblItBERT-1 results: - task: name: Masked Language Modeling type: fill-mask --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BiblItBERT-1 This model is a fine-tuned version of [vppvgit/BiblItBERT](https://huggingface.co/vppvgit/BiblItBERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7775 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:------:|:---------------:| | 1.5764 | 1.0 | 16528 | 1.5214 | | 1.4572 | 2.0 | 33056 | 1.4201 | | 1.3787 | 3.0 | 49584 | 1.3728 | | 1.3451 | 4.0 | 66112 | 1.3245 | | 1.3066 | 5.0 | 82640 | 1.2614 | | 1.2447 | 6.0 | 99168 | 1.2333 | | 1.2172 | 7.0 | 115696 | 1.2149 | | 1.2079 | 8.0 | 132224 | 1.1853 | | 1.2167 | 9.0 | 148752 | 1.1586 | | 1.2056 | 10.0 | 165280 | 1.1503 | | 1.1307 | 11.0 | 181808 | 1.1224 | | 1.1689 | 12.0 | 198336 | 1.1074 | | 1.1007 | 13.0 | 214864 | 1.0924 | | 1.0901 | 14.0 | 231392 | 1.0659 | | 1.0667 | 15.0 | 247920 | 1.0650 | | 1.0434 | 16.0 | 264448 | 1.0362 | | 1.0333 | 17.0 | 280976 | 1.0250 | | 1.0342 | 18.0 | 297504 | 1.0198 | | 1.0059 | 19.0 | 314032 | 0.9950 | | 0.9719 | 20.0 | 330560 | 0.9836 | | 0.9863 | 21.0 | 347088 | 0.9873 | | 0.9781 | 22.0 | 363616 | 0.9724 | | 0.9369 | 23.0 | 380144 | 0.9599 | | 0.9578 | 24.0 | 396672 | 0.9557 | | 0.9253 | 25.0 | 413200 | 0.9400 | | 0.9441 | 26.0 | 429728 | 0.9222 | | 0.9138 | 27.0 | 446256 | 0.9140 | | 0.882 | 28.0 | 462784 | 0.9045 | | 0.864 | 29.0 | 479312 | 0.8880 | | 0.8632 | 30.0 | 495840 | 0.9023 | | 0.8342 | 32.0 | 528896 | 0.8740 | | 0.8037 | 34.0 | 561952 | 0.8647 | | 0.8119 | 37.0 | 611536 | 0.8358 | | 0.8011 | 38.0 | 628064 | 0.8252 | | 0.786 | 39.0 | 644592 | 0.8228 | | 0.7697 | 41.0 | 677648 | 0.8138 | | 0.7485 | 42.0 | 694176 | 0.8104 | | 0.7689 | 43.0 | 710704 | 0.8018 | | 0.7401 | 45.0 | 743760 | 0.7957 | | 0.7031 | 47.0 | 776816 | 0.7726 | | 0.7578 | 48.0 | 793344 | 0.7864 | | 0.7298 | 49.0 | 809872 | 0.7775 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
CodeNinja1126/bert-q-encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2021-09-27T10:46:15Z
--- tags: - generated_from_trainer datasets: - null model-index: - name: BibliBERT results: - task: name: Masked Language Modeling type: fill-mask --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BibliBERT This model is a fine-tuned version of [dbmdz/bert-base-italian-xxl-cased](https://huggingface.co/dbmdz/bert-base-italian-xxl-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7784 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:------:|:---------------:| | 1.5764 | 1.0 | 16528 | 1.5214 | | 1.4572 | 2.0 | 33056 | 1.4201 | | 1.3787 | 3.0 | 49584 | 1.3728 | | 1.3451 | 4.0 | 66112 | 1.3245 | | 1.3066 | 5.0 | 82640 | 1.2614 | | 1.2447 | 6.0 | 99168 | 1.2333 | | 1.2172 | 7.0 | 115696 | 1.2149 | | 1.2079 | 8.0 | 132224 | 1.1853 | | 1.2167 | 9.0 | 148752 | 1.1586 | | 1.2056 | 10.0 | 165280 | 1.1503 | | 1.1307 | 11.0 | 181808 | 1.1224 | | 1.1689 | 12.0 | 198336 | 1.1074 | | 1.1007 | 13.0 | 214864 | 1.0924 | | 1.0901 | 14.0 | 231392 | 1.0659 | | 1.0667 | 15.0 | 247920 | 1.0650 | | 1.0434 | 16.0 | 264448 | 1.0362 | | 1.0333 | 17.0 | 280976 | 1.0250 | | 1.0342 | 18.0 | 297504 | 1.0198 | | 1.0059 | 19.0 | 314032 | 0.9950 | | 0.9719 | 20.0 | 330560 | 0.9836 | | 0.9863 | 21.0 | 347088 | 0.9873 | | 0.9781 | 22.0 | 363616 | 0.9724 | | 0.9369 | 23.0 | 380144 | 0.9599 | | 0.9578 | 24.0 | 396672 | 0.9557 | | 0.9253 | 25.0 | 413200 | 0.9400 | | 0.9441 | 26.0 | 429728 | 0.9222 | | 0.9138 | 27.0 | 446256 | 0.9140 | | 0.882 | 28.0 | 462784 | 0.9045 | | 0.864 | 29.0 | 479312 | 0.8880 | | 0.8632 | 30.0 | 495840 | 0.9023 | | 0.8342 | 32.0 | 528896 | 0.8740 | | 0.8037 | 34.0 | 561952 | 0.8647 | | 0.8119 | 37.0 | 611536 | 0.8358 | | 0.8011 | 38.0 | 628064 | 0.8252 | | 0.786 | 39.0 | 644592 | 0.8228 | | 0.7697 | 41.0 | 677648 | 0.8138 | | 0.7485 | 42.0 | 694176 | 0.8104 | | 0.7689 | 43.0 | 710704 | 0.8018 | | 0.7401 | 45.0 | 743760 | 0.7957 | | 0.7031 | 47.0 | 776816 | 0.7726 | | 0.7578 | 48.0 | 793344 | 0.7864 | | 0.7298 | 49.0 | 809872 | 0.7775 | | 0.707 | 50.0 | 826400 | 0.7784 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
CoderEFE/DialoGPT-marxbot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational", "has_space" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - spacy - token-classification language: - fr model-index: - name: fr_ner_ingredients results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8990228013 - name: NER Recall type: recall value: 0.9019607843 - name: NER F Score type: f_score value: 0.9004893964 --- | Feature | Description | | --- | --- | | **Name** | `fr_ner_ingredients` | | **Version** | `0.0.0` | | **spaCy** | `>=3.2.1,<3.3.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (5 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `BRAND`, `FOOD PRODUCT`, `INGREDIENT`, `MEASURE`, `QUANTITY` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 90.05 | | `ENTS_P` | 89.90 | | `ENTS_R` | 90.20 | | `TOK2VEC_LOSS` | 65769.53 | | `NER_LOSS` | 7865.95 |
CoderEFE/DialoGPT-medium-marx
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - spacy - token-classification language: - fr model-index: - name: fr_pipeline results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.9011406844 - name: NER Recall type: recall value: 0.92578125 - name: NER F Score type: f_score value: 0.9132947977 --- | Feature | Description | | --- | --- | | **Name** | `fr_pipeline` | | **Version** | `0.0.0` | | **spaCy** | `>=3.2.1,<3.3.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (4 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `FOOD PRODUCT`, `INGREDIENT`, `MEASURE`, `QUANTITY` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 91.33 | | `ENTS_P` | 90.11 | | `ENTS_R` | 92.58 | | `TOK2VEC_LOSS` | 8670.94 | | `NER_LOSS` | 4165.31 |
CoffeeAddict93/gpt2-medium-modest-proposal
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes: 1. magnitude sparsification at 50% upon initialization. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted. 2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers. 3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad``` ``` eval_exact_match = 80.2081 eval_f1 = 87.5921 eval_samples = 10784 ``` # Setup ```bash # OpenVINO/NNCF git clone https://github.com/vuiseng9/nncf && cd nncf git checkout tld-poc git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2 python setup.py develop pip install -r examples/torch/requirements.txt # Huggingface nn_pruning git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning git checkout reproduce-evaluation git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446 pip install -e ".[dev]" # Huggingface Transformers git clone https://github.com/vuiseng9/transformers && cd transformers git checkout tld-poc git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5 pip install -e . head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {} # Additional dependencies pip install onnx ``` # Train ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt BASE_MODEL=/path/to/cloned_repo_above #to-revise wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt/raw/main/nncf_bert_squad_sparsity.json NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise OUTROOT=/path/to/train_output_root #to-revise WORKDIR=transformers/examples/pytorch/question-answering #to-revise RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt cd $WORKDIR OUTDIR=$OUTROOT/$RUNID mkdir -p $OUTDIR export CUDA_VISIBLE_DEVICES=0 NEPOCH=5 python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --optimize_model_before_eval \ --optimized_checkpoint $BASE_MODEL \ --dataset_name squad \ --do_eval \ --do_train \ --evaluation_strategy steps \ --eval_steps 250 \ --learning_rate 3e-5 \ --lr_scheduler_type cosine_with_restarts \ --warmup_ratio 0.25 \ --cosine_cycles 1 \ --teacher bert-large-uncased-whole-word-masking-finetuned-squad \ --teacher_ratio 0.9 \ --num_train_epochs $NEPOCH \ --per_device_eval_batch_size 128 \ --per_device_train_batch_size 16 \ --max_seq_length 384 \ --doc_stride 128 \ --save_steps 250 \ --nncf_config $NNCF_CFG \ --logging_steps 1 \ --overwrite_output_dir \ --run_name $RUNID \ --output_dir $OUTDIR ``` # Eval This repo must be cloned locally. ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt MODELROOT=/path/to/cloned_repo_above #to-revise export CUDA_VISIBLE_DEVICES=0 OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt WORKDIR=transformers/examples/pytorch/question-answering #to-revise cd $WORKDIR mkdir $OUTDIR nohup python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --dataset_name squad \ --optimize_model_before_eval \ --qat_checkpoint $MODELROOT/checkpoint-26250 \ --nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \ --to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-50.0sparse-qat-lt.onnx \ --do_eval \ --per_device_eval_batch_size 128 \ --max_seq_length 384 \ --doc_stride 128 \ --overwrite_output_dir \ --output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log & ```
CoffeeAddict93/gpt2-modest-proposal
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes: 1. magnitude sparsification at 57.92% upon initialization so that sparsity over all linear layers of bert-base is at 90%. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted. 2. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad``` ``` eval_exact_match = 80.4447 eval_f1 = 87.7678 eval_samples = 10784 ``` # Setup ```bash # OpenVINO/NNCF git clone https://github.com/vuiseng9/nncf && cd nncf git checkout tld-poc git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2 python setup.py develop pip install -r examples/torch/requirements.txt # Huggingface nn_pruning git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning git checkout reproduce-evaluation git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446 pip install -e ".[dev]" # Huggingface Transformers git clone https://github.com/vuiseng9/transformers && cd transformers git checkout tld-poc git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5 pip install -e . head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {} # Additional dependencies pip install onnx ``` # Train ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt BASE_MODEL=/path/to/cloned_repo_above #to-revise wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-lt/raw/main/nncf_bert_squad_sparsity.json NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise OUTROOT=/path/to/train_output_root #to-revise WORKDIR=transformers/examples/pytorch/question-answering #to-revise RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-lt cd $WORKDIR OUTDIR=$OUTROOT/$RUNID mkdir -p $OUTDIR export CUDA_VISIBLE_DEVICES=0 NEPOCH=5 python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --optimize_model_before_eval \ --optimized_checkpoint $BASE_MODEL \ --dataset_name squad \ --do_eval \ --do_train \ --evaluation_strategy steps \ --eval_steps 250 \ --learning_rate 3e-5 \ --lr_scheduler_type cosine_with_restarts \ --warmup_ratio 0.25 \ --cosine_cycles 1 \ --teacher bert-large-uncased-whole-word-masking-finetuned-squad \ --teacher_ratio 0.9 \ --num_train_epochs $NEPOCH \ --per_device_eval_batch_size 128 \ --per_device_train_batch_size 16 \ --max_seq_length 384 \ --doc_stride 128 \ --save_steps 250 \ --nncf_config $NNCF_CFG \ --logging_steps 1 \ --overwrite_output_dir \ --run_name $RUNID \ --output_dir $OUTDIR ``` # Eval This repo must be cloned locally. ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-lt MODELROOT=/path/to/cloned_repo_above #to-revise export CUDA_VISIBLE_DEVICES=0 OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-lt WORKDIR=transformers/examples/pytorch/question-answering #to-revise cd $WORKDIR mkdir $OUTDIR nohup python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --dataset_name squad \ --optimize_model_before_eval \ --qat_checkpoint $MODELROOT/checkpoint-20000 \ --nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \ --to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-lt.onnx \ --do_eval \ --per_device_eval_batch_size 128 \ --max_seq_length 384 \ --doc_stride 128 \ --overwrite_output_dir \ --output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log & ```
CogComp/bart-faithful-summary-detector
[ "pytorch", "jax", "bart", "text-classification", "en", "dataset:xsum", "transformers", "xsum", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BartForSequenceClassification" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": 1, "max_length": 128, "min_length": 12, "no_repeat_ngram_size": null, "num_beams": 4, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
null
This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes: 1. magnitude sparsification at 57.92% upon initialization so that sparsity over all linear layers of bert-base is at 90%. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted. 2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers. 3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad``` ``` eval_exact_match = 80.4541 eval_f1 = 87.6832 eval_samples = 10784 ``` # Setup ```bash # OpenVINO/NNCF git clone https://github.com/vuiseng9/nncf && cd nncf git checkout tld-poc git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2 python setup.py develop pip install -r examples/torch/requirements.txt # Huggingface nn_pruning git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning git checkout reproduce-evaluation git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446 pip install -e ".[dev]" # Huggingface Transformers git clone https://github.com/vuiseng9/transformers && cd transformers git checkout tld-poc git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5 pip install -e . head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {} # Additional dependencies pip install onnx ``` # Train ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt BASE_MODEL=/path/to/cloned_repo_above #to-revise wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-qat-lt/raw/main/nncf_bert_squad_sparsity.json NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise OUTROOT=/path/to/train_output_root #to-revise WORKDIR=transformers/examples/pytorch/question-answering #to-revise RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-qat-lt cd $WORKDIR OUTDIR=$OUTROOT/$RUNID mkdir -p $OUTDIR export CUDA_VISIBLE_DEVICES=0 NEPOCH=5 python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --optimize_model_before_eval \ --optimized_checkpoint $BASE_MODEL \ --dataset_name squad \ --do_eval \ --do_train \ --evaluation_strategy steps \ --eval_steps 250 \ --learning_rate 3e-5 \ --lr_scheduler_type cosine_with_restarts \ --warmup_ratio 0.25 \ --cosine_cycles 1 \ --teacher bert-large-uncased-whole-word-masking-finetuned-squad \ --teacher_ratio 0.9 \ --num_train_epochs $NEPOCH \ --per_device_eval_batch_size 128 \ --per_device_train_batch_size 16 \ --max_seq_length 384 \ --doc_stride 128 \ --save_steps 250 \ --nncf_config $NNCF_CFG \ --logging_steps 1 \ --overwrite_output_dir \ --run_name $RUNID \ --output_dir $OUTDIR ``` # Eval This repo must be cloned locally. ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-qat-lt MODELROOT=/path/to/cloned_repo_above #to-revise export CUDA_VISIBLE_DEVICES=0 OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-qat-lt WORKDIR=transformers/examples/pytorch/question-answering #to-revise cd $WORKDIR mkdir $OUTDIR nohup python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --dataset_name squad \ --optimize_model_before_eval \ --qat_checkpoint $MODELROOT/checkpoint-21750 \ --nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \ --to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-57.92sparse-qat-lt.onnx \ --do_eval \ --per_device_eval_batch_size 128 \ --max_seq_length 384 \ --doc_stride 128 \ --overwrite_output_dir \ --output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log & ``` ### tile-alignment to evaluate tile-alignment checkpoint, add ```--tile_alignment``` and point ```--qat_checkpoint``` to checkpoint with 'tilealigned' postfix. Use branch ```tld-poc``` with commit id ```c525c52cq```
CogComp/roberta-temporal-predictor
[ "pytorch", "roberta", "fill-mask", "arxiv:2202.00436", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
This model is a downstream optimization of [```vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt```](https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt) using [OpenVINO/NNCF](https://github.com/openvinotoolkit/nncf). Applied optimization includes: 1. magnitude sparsification at 60% upon initialization. Parameters are ranked globally via thier absolute norm. Only linear layers of self-attention and ffnn are targeted. 2. NNCF Quantize-Aware Training - Symmetric 8-bit for both weight and activation on all learnable layers. 3. Custom distillation with large model ```bert-large-uncased-whole-word-masking-finetuned-squad``` ``` eval_exact_match = 80.3122 eval_f1 = 87.6162 eval_samples = 10784 ``` # Setup ```bash # OpenVINO/NNCF git clone https://github.com/vuiseng9/nncf && cd nncf git checkout tld-poc git reset --hard 1dec7afe7a4b567c059fcf287ea2c234980fded2 python setup.py develop pip install -r examples/torch/requirements.txt # Huggingface nn_pruning git clone https://github.com/vuiseng9/nn_pruning && cd nn_pruning git checkout reproduce-evaluation git reset --hard 2d4e196d694c465e43e5fbce6c3836d0a60e1446 pip install -e ".[dev]" # Huggingface Transformers git clone https://github.com/vuiseng9/transformers && cd transformers git checkout tld-poc git reset --hard 10a1e29d84484e48fd106f58957d9ffc89dc43c5 pip install -e . head -n 1 examples/pytorch/question-answering/requirements.txt | xargs -i pip install {} # Additional dependencies pip install onnx ``` # Train ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt BASE_MODEL=/path/to/cloned_repo_above #to-revise wget https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt/raw/main/nncf_bert_squad_sparsity.json NNCF_CFG=/path/to/downloaded_nncf_cfg_above #to-revise OUTROOT=/path/to/train_output_root #to-revise WORKDIR=transformers/examples/pytorch/question-answering #to-revise RUNID=bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt cd $WORKDIR OUTDIR=$OUTROOT/$RUNID mkdir -p $OUTDIR export CUDA_VISIBLE_DEVICES=0 NEPOCH=5 python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --optimize_model_before_eval \ --optimized_checkpoint $BASE_MODEL \ --dataset_name squad \ --do_eval \ --do_train \ --evaluation_strategy steps \ --eval_steps 250 \ --learning_rate 3e-5 \ --lr_scheduler_type cosine_with_restarts \ --warmup_ratio 0.25 \ --cosine_cycles 1 \ --teacher bert-large-uncased-whole-word-masking-finetuned-squad \ --teacher_ratio 0.9 \ --num_train_epochs $NEPOCH \ --per_device_eval_batch_size 128 \ --per_device_train_batch_size 16 \ --max_seq_length 384 \ --doc_stride 128 \ --save_steps 250 \ --nncf_config $NNCF_CFG \ --logging_steps 1 \ --overwrite_output_dir \ --run_name $RUNID \ --output_dir $OUTDIR ``` # Eval This repo must be cloned locally. ```bash git clone https://huggingface.co/vuiseng9/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt MODELROOT=/path/to/cloned_repo_above #to-revise export CUDA_VISIBLE_DEVICES=0 OUTDIR=eval-bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt WORKDIR=transformers/examples/pytorch/question-answering #to-revise cd $WORKDIR mkdir $OUTDIR nohup python run_qa.py \ --model_name_or_path vuiseng9/bert-base-squadv1-block-pruning-hybrid \ --dataset_name squad \ --optimize_model_before_eval \ --qat_checkpoint $MODELROOT/checkpoint-22000 \ --nncf_config $MODELROOT/nncf_bert_squad_sparsity.json \ --to_onnx $OUTDIR/bert-base-squadv1-block-pruning-hybrid-filled-lt-nncf-60.0sparse-qat-lt.onnx \ --do_eval \ --per_device_eval_batch_size 128 \ --max_seq_length 384 \ --doc_stride 128 \ --overwrite_output_dir \ --output_dir $OUTDIR 2>&1 | tee $OUTDIR/run.log & ```