modelId
stringlengths 4
81
| tags
list | pipeline_tag
stringclasses 17
values | config
dict | downloads
int64 0
59.7M
| first_commit
timestamp[ns, tz=UTC] | card
stringlengths 51
438k
|
---|---|---|---|---|---|---|
AdapterHub/roberta-base-pf-comqa | [
"roberta",
"en",
"dataset:com_qa",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering"
] | question-answering | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_vp-nl_s833
Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-conll2003 | [
"roberta",
"en",
"dataset:conll2003",
"arxiv:2104.08247",
"adapter-transformers",
"token-classification",
"adapterhub:ner/conll2003"
] | token-classification | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 13 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_vp-nl_s6
Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-copa | [
"roberta",
"en",
"arxiv:2104.08247",
"adapter-transformers",
"adapterhub:comsense/copa"
] | null | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | 2022-07-11T19:23:24Z | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_vp-nl_s783
Fine-tuned [facebook/wav2vec2-large-nl-voxpopuli](https://huggingface.co/facebook/wav2vec2-large-nl-voxpopuli) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-drop | [
"roberta",
"en",
"dataset:drop",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering"
] | question-answering | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_unispeech-sat_s377
Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-duorc_p | [
"roberta",
"en",
"dataset:duorc",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering"
] | question-answering | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_unispeech-sat_s103
Fine-tuned [microsoft/unispeech-sat-large](https://huggingface.co/microsoft/unispeech-sat-large) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-duorc_s | [
"roberta",
"en",
"dataset:duorc",
"arxiv:2104.08247",
"adapter-transformers",
"question-answering"
] | question-answering | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_xls-r_s17
Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
AdapterHub/roberta-base-pf-emo | [
"roberta",
"en",
"dataset:emo",
"arxiv:2104.08247",
"adapter-transformers",
"text-classification"
] | text-classification | {
"architectures": null,
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
language:
- pt
license: apache-2.0
tags:
- automatic-speech-recognition
- pt
datasets:
- mozilla-foundation/common_voice_7_0
---
# exp_w2v2t_pt_xls-r_s689
Fine-tuned [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) for speech recognition using the train split of [Common Voice 7.0 (pt)](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0).
When using this model, make sure that your speech input is sampled at 16kHz.
This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool.
|
Aftabhussain/Tomato_Leaf_Classifier | [
"pytorch",
"tensorboard",
"vit",
"image-classification",
"transformers",
"huggingpics",
"model-index",
"autotrain_compatible"
] | image-classification | {
"architectures": [
"ViTForImageClassification"
],
"model_type": "vit",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 50 | null | ---
tags:
- Pixelcopter-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-Pixelcopter-PLE-v0
results:
- metrics:
- type: mean_reward
value: 13.30 +/- 9.12
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pixelcopter-PLE-v0
type: Pixelcopter-PLE-v0
---
# ไฝฟ็จ**Reinforce**ๆบ่ฝไฝๆฅ็ฉ**Pixelcopter-PLE-v0**
่ฟๆฏไธไธชไฝฟ็จ**Reinforce**่ฎญ็ปๆ็ด ็ๆจกๅ็ฉ**Pixelcopter-PLE-v0**.
่ฆๅญฆไน ไฝฟ็จ่ฟไธชๆจกๅๅนถ่ฎญ็ปไฝ ็ๆจกๅ, ่ฏทๆฅ้
ๆทฑๅบฆๅผบๅๅญฆไน ่ฏพ็จ็ฌฌ5ๅๅ
: https://github.com/huggingface/deep-rl-class/tree/main/unit5
|
AidenGO/KDXF_Bert4MaskedLM | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language: en
tags:
- PROP
- fill-mask
- Pretrain4IR
license: apache-2.0
datasets:
- msmarco
---
# PROP-marco
**PROP**, **P**re-training with **R**epresentative w**O**rds **P**rediction, is a new pre-training method tailored for ad-hoc retrieval. PROP is inspired by the classical statistical language model for IR, specifically the query likelihood model, which assumes that the query is generated as the piece of text representative of the โidealโ document. Based on this idea, we construct the representative words prediction (ROP) task for pre-training. The full paper can be found [here](https://arxiv.org/pdf/2010.10137.pdf).
# Citation
If you find our work useful, please consider citing our paper:
```bibtex
@inproceedings{DBLP:conf/wsdm/MaGZFJC21,
author = {Xinyu Ma and
Jiafeng Guo and
Ruqing Zhang and
Yixing Fan and
Xiang Ji and
Xueqi Cheng},
editor = {Liane Lewin{-}Eytan and
David Carmel and
Elad Yom{-}Tov and
Eugene Agichtein and
Evgeniy Gabrilovich},
title = {{PROP:} Pre-training with Representative Words Prediction for Ad-hoc
Retrieval},
booktitle = {{WSDM} '21, The Fourteenth {ACM} International Conference on Web Search
and Data Mining, Virtual Event, Israel, March 8-12, 2021},
pages = {283--291},
publisher = {{ACM}},
year = {2021},
url = {https://doi.org/10.1145/3437963.3441777},
doi = {10.1145/3437963.3441777},
timestamp = {Wed, 07 Apr 2021 16:17:44 +0200},
biburl = {https://dblp.org/rec/conf/wsdm/MaGZFJC21.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
``` |
Andrija/SRoBERTaFastBPE | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
thumbnail: https://github.com/karanchahal/distiller/blob/master/distiller.jpg
tags:
- question-answering
license: apache-2.0
datasets:
- squad
metrics:
- squad
---
# DistilBERT with a second step of distillation
## Model description
This model replicates the "DistilBERT (D)" model from Table 2 of the [DistilBERT paper](https://arxiv.org/pdf/1910.01108.pdf). In this approach, a DistilBERT student is fine-tuned on SQuAD v1.1, but with a BERT model (also fine-tuned on SQuAD v1.1) acting as a teacher for a second step of task-specific distillation.
In this version, the following pre-trained models were used:
* Student: `distilbert-base-uncased`
* Teacher: `lewtun/bert-base-uncased-finetuned-squad-v1`
## Training data
This model was trained on the SQuAD v1.1 dataset which can be obtained from the `datasets` library as follows:
```python
from datasets import load_dataset
squad = load_dataset('squad')
```
## Training procedure
## Eval results
| | Exact Match | F1 |
|------------------|-------------|------|
| DistilBERT paper | 79.1 | 86.9 |
| Ours | 78.4 | 86.5 |
The scores were calculated using the `squad` metric from `datasets`.
### BibTeX entry and citation info
```bibtex
@misc{sanh2020distilbert,
title={DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter},
author={Victor Sanh and Lysandre Debut and Julien Chaumond and Thomas Wolf},
year={2020},
eprint={1910.01108},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
``` |
AndyJ/clinicalBERT | [
"pytorch",
"transformers"
] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null | ---
tags:
- FrozenLake-v1-4x4
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-FrozenLake-v1-4x4-Slippery
results:
- metrics:
- type: mean_reward
value: 0.04 +/- 0.19
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-4x4
type: FrozenLake-v1-4x4
---
# **Q-Learning** Agent playing **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="bothrajat/q-FrozenLake-v1-4x4-Slippery", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
```
|
AnonymousSub/AR_bert-base-uncased | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- udpos28
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: udpos28-sm-all-POS
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: udpos28
type: udpos28
args: en
metrics:
- name: Precision
type: precision
value: 0.9586517032792105
- name: Recall
type: recall
value: 0.9588997472284696
- name: F1
type: f1
value: 0.9587757092110369
- name: Accuracy
type: accuracy
value: 0.964820639556654
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# udpos28-sm-all-POS
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the udpos28 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1479
- Precision: 0.9587
- Recall: 0.9589
- F1: 0.9588
- Accuracy: 0.9648
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.1261 | 1.0 | 4978 | 0.1358 | 0.9513 | 0.9510 | 0.9512 | 0.9581 |
| 0.0788 | 2.0 | 9956 | 0.1326 | 0.9578 | 0.9578 | 0.9578 | 0.9642 |
| 0.0424 | 3.0 | 14934 | 0.1479 | 0.9587 | 0.9589 | 0.9588 | 0.9648 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.10.2+cu102
- Datasets 2.2.2
- Tokenizers 0.12.1
|
AnonymousSub/AR_cline | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null |
---
license: cc-by-4.0
metrics:
- bleu4
- meteor
- rouge-l
- bertscore
- moverscore
language: ko
datasets:
- lmqg/qg_koquad
pipeline_tag: text2text-generation
tags:
- question generation
widget:
- text: "1990๋
์ํ ใ <hl> ๋จ๋ถ๊ตฐ <hl> ใ์์ ๋จ์ญ์ผ๋ก ์ํ๋ฐฐ์ฐ ์ฒซ ๋ฐ๋ท์ ์ด์ด ๊ฐ์ ํด KBS ๋๋ผ๋ง ใ์ง๊ตฌ์ธใ์์ ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๊ณ ์ด๋ฌํด MBC ใ์ฌ๋ช
์ ๋๋์ใ๋ฅผ ํตํด ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๋ค."
example_title: "Question Generation Example 1"
- text: "๋ฐฑ์ ์ด ์๊ธฐ๋๋ฌธ์ ์๋ฐฉ์ฑ
์ <hl> ์ด์ถฉ์ <hl> ๋ฅผ ์ฌ์ฉํ๋ฉด์ ์์ ์ฅ์(์ฐป์ฐฌ ๋ฐ์นจ, ๋ฐฐ์๋ก, ๊ณ ์ธ ๋ฌผ์ ์ด๋ฆฐ ์ ์ฅ์, ๋ฒ๋ ค์ง ํ์ด์ด ๋ฑ)์ ์๋ฅผ ์ค์์ผ๋ก์จ ๋งค๊ฐ์ฒด๋ฅผ ํต์ ํ ์ ์๋ค."
example_title: "Question Generation Example 2"
- text: "<hl> ์ํ
์ดํฌ ์ดฌ์ <hl> ์ด๊ธฐ ๋๋ฌธ์ ํ ์ฌ๋์ด ์ค์๋ฅผ ํ๋ฉด ์ฒ์๋ถํฐ ๋ค์ ์ฐ์ด์ผ ํ๋ ์ํฉ์ด ๋ฐ์ํ๋ค."
example_title: "Question Generation Example 3"
model-index:
- name: lmqg/mt5-base-koquad-qg
results:
- task:
name: Text2text Generation
type: text2text-generation
dataset:
name: lmqg/qg_koquad
type: default
args: default
metrics:
- name: BLEU4 (Question Generation)
type: bleu4_question_generation
value: 12.18
- name: ROUGE-L (Question Generation)
type: rouge_l_question_generation
value: 28.57
- name: METEOR (Question Generation)
type: meteor_question_generation
value: 29.62
- name: BERTScore (Question Generation)
type: bertscore_question_generation
value: 84.52
- name: MoverScore (Question Generation)
type: moverscore_question_generation
value: 83.36
- name: QAAlignedF1Score-BERTScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_f1_score_bertscore_question_answer_generation_with_gold_answer_gold_answer
value: 88.8
- name: QAAlignedRecall-BERTScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_recall_bertscore_question_answer_generation_with_gold_answer_gold_answer
value: 88.76
- name: QAAlignedPrecision-BERTScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_precision_bertscore_question_answer_generation_with_gold_answer_gold_answer
value: 88.84
- name: QAAlignedF1Score-MoverScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_f1_score_moverscore_question_answer_generation_with_gold_answer_gold_answer
value: 85.93
- name: QAAlignedRecall-MoverScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_recall_moverscore_question_answer_generation_with_gold_answer_gold_answer
value: 85.87
- name: QAAlignedPrecision-MoverScore (Question & Answer Generation (with Gold Answer)) [Gold Answer]
type: qa_aligned_precision_moverscore_question_answer_generation_with_gold_answer_gold_answer
value: 86.01
- name: QAAlignedF1Score-BERTScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_f1_score_bertscore_question_answer_generation_gold_answer
value: 77.26
- name: QAAlignedRecall-BERTScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_recall_bertscore_question_answer_generation_gold_answer
value: 78.25
- name: QAAlignedPrecision-BERTScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_precision_bertscore_question_answer_generation_gold_answer
value: 76.37
- name: QAAlignedF1Score-MoverScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_f1_score_moverscore_question_answer_generation_gold_answer
value: 77.51
- name: QAAlignedRecall-MoverScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_recall_moverscore_question_answer_generation_gold_answer
value: 78.95
- name: QAAlignedPrecision-MoverScore (Question & Answer Generation) [Gold Answer]
type: qa_aligned_precision_moverscore_question_answer_generation_gold_answer
value: 76.26
---
# Model Card of `lmqg/mt5-base-koquad-qg`
This model is fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) for question generation task on the [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation).
### Overview
- **Language model:** [google/mt5-base](https://huggingface.co/google/mt5-base)
- **Language:** ko
- **Training data:** [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) (default)
- **Online Demo:** [https://autoqg.net/](https://autoqg.net/)
- **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation)
- **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992)
### Usage
- With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-)
```python
from lmqg import TransformersQG
# initialize model
model = TransformersQG(language="ko", model="lmqg/mt5-base-koquad-qg")
# model prediction
questions = model.generate_q(list_context="1990๋
์ํ ใ ๋จ๋ถ๊ตฐ ใ์์ ๋จ์ญ์ผ๋ก ์ํ๋ฐฐ์ฐ ์ฒซ ๋ฐ๋ท์ ์ด์ด ๊ฐ์ ํด KBS ๋๋ผ๋ง ใ์ง๊ตฌ์ธใ์์ ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๊ณ ์ด๋ฌํด MBC ใ์ฌ๋ช
์ ๋๋์ใ๋ฅผ ํตํด ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๋ค.", list_answer="๋จ๋ถ๊ตฐ")
```
- With `transformers`
```python
from transformers import pipeline
pipe = pipeline("text2text-generation", "lmqg/mt5-base-koquad-qg")
output = pipe("1990๋
์ํ ใ <hl> ๋จ๋ถ๊ตฐ <hl> ใ์์ ๋จ์ญ์ผ๋ก ์ํ๋ฐฐ์ฐ ์ฒซ ๋ฐ๋ท์ ์ด์ด ๊ฐ์ ํด KBS ๋๋ผ๋ง ใ์ง๊ตฌ์ธใ์์ ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๊ณ ์ด๋ฌํด MBC ใ์ฌ๋ช
์ ๋๋์ใ๋ฅผ ํตํด ๋จ์ญ์ผ๋ก ์ถ์ฐํ์๋ค.")
```
## Evaluation
- ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_koquad.default.json)
| | Score | Type | Dataset |
|:-----------|--------:|:--------|:-----------------------------------------------------------------|
| BERTScore | 84.52 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| Bleu_1 | 28.54 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| Bleu_2 | 21.05 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| Bleu_3 | 15.92 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| Bleu_4 | 12.18 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| METEOR | 29.62 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| MoverScore | 83.36 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| ROUGE_L | 28.57 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
- ***Metric (Question & Answer Generation, Reference Answer)***: Each question is generated from *the gold answer*. [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qg_koquad.default.json)
| | Score | Type | Dataset |
|:--------------------------------|--------:|:--------|:-----------------------------------------------------------------|
| QAAlignedF1Score (BERTScore) | 88.8 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedF1Score (MoverScore) | 85.93 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedPrecision (BERTScore) | 88.84 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedPrecision (MoverScore) | 86.01 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedRecall (BERTScore) | 88.76 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedRecall (MoverScore) | 85.87 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
- ***Metric (Question & Answer Generation, Pipeline Approach)***: Each question is generated on the answer generated by [`lmqg/mt5-base-koquad-ae`](https://huggingface.co/lmqg/mt5-base-koquad-ae). [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg/raw/main/eval_pipeline/metric.first.answer.paragraph.questions_answers.lmqg_qg_koquad.default.lmqg_mt5-base-koquad-ae.json)
| | Score | Type | Dataset |
|:--------------------------------|--------:|:--------|:-----------------------------------------------------------------|
| QAAlignedF1Score (BERTScore) | 77.26 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedF1Score (MoverScore) | 77.51 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedPrecision (BERTScore) | 76.37 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedPrecision (MoverScore) | 76.26 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedRecall (BERTScore) | 78.25 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
| QAAlignedRecall (MoverScore) | 78.95 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) |
## Training hyperparameters
The following hyperparameters were used during fine-tuning:
- dataset_path: lmqg/qg_koquad
- dataset_name: default
- input_types: ['paragraph_answer']
- output_types: ['question']
- prefix_types: None
- model: google/mt5-base
- max_length: 512
- max_length_output: 32
- epoch: 11
- batch: 4
- lr: 0.0005
- fp16: False
- random_seed: 1
- gradient_accumulation_steps: 16
- label_smoothing: 0.15
The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mt5-base-koquad-qg/raw/main/trainer_config.json).
## Citation
```
@inproceedings{ushio-etal-2022-generative,
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
author = "Ushio, Asahi and
Alva-Manchego, Fernando and
Camacho-Collados, Jose",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, U.A.E.",
publisher = "Association for Computational Linguistics",
}
```
|
AnonymousSub/AR_declutr | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language:
- en
license: mit
tags:
- text-classification
inference: false
widget:
- text: "Why do we need an NFQA taxonomy?"
---
# Non Factoid Question Category classification in English
## NFQA model
Repository: [https://github.com/Lurunchik/NF-CATS](https://github.com/Lurunchik/NF-CATS)
Model trained with NFQA dataset. Base model is [roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2), a RoBERTa-based model for the task of Question Answering, fine-tuned using the SQuAD2.0 dataset.
Uses `NOT-A-QUESTION`, `FACTOID`, `DEBATE`, `EVIDENCE-BASED`, `INSTRUCTION`, `REASON`, `EXPERIENCE`, `COMPARISON` labels.
## How to use NFQA cat with HuggingFace
##### Load NFQA cat and its tokenizer:
```python
from transformers import AutoTokenizer
from nfqa_model import RobertaNFQAClassification
nfqa_model = RobertaNFQAClassification.from_pretrained("Lurunchik/nf-cats")
nfqa_tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2")
```
##### Make prediction using helper function:
```python
def get_nfqa_category_prediction(text):
output = nfqa_model(**nfqa_tokenizer(text, return_tensors="pt"))
index = output.logits.argmax()
return nfqa_model.config.id2label[int(index)]
get_nfqa_category_prediction('how to assign category?')
# result
#'INSTRUCTION'
```
## Demo
You can test the model via [hugginface space](https://huggingface.co/spaces/Lurunchik/nf-cats).
[](https://huggingface.co/spaces/Lurunchik/nf-cats)
## Citation
If you use `NFQA-cats` in your work, please cite [this paper](https://dl.acm.org/doi/10.1145/3477495.3531926)
```
@misc{bolotova2022nfcats,
author = {Bolotova, Valeriia and Blinov, Vladislav and Scholer, Falk and Croft, W. Bruce and Sanderson, Mark},
title = {A Non-Factoid Question-Answering Taxonomy},
year = {2022},
isbn = {9781450387323},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3477495.3531926},
doi = {10.1145/3477495.3531926},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {1196โ1207},
numpages = {12},
keywords = {question taxonomy, non-factoid question-answering, editorial study, dataset analysis},
location = {Madrid, Spain},
series = {SIGIR '22}
}
```
Enjoy! ๐ค |
AnonymousSub/AR_rule_based_roberta_bert_quadruplet_epochs_1_shard_1 | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | 2022-07-13T12:32:46Z | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 28.44 +/- 165.66
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
AnonymousSub/AR_rule_based_twostagetriplet_epochs_1_shard_1 | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | # Introduction
torchscript models for https://huggingface.co/wgb14/icefall-asr-gigaspeech-pruned-transducer-stateless2
See also
https://github.com/k2-fsa/icefall/pull/364
and
https://github.com/k2-fsa/icefall/pull/361
|
AnonymousSub/AR_specter | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
pipeline_tag: token-classification
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
tags:
- distilbert
---
**task**: `token-classification`
**Backend:** `sagemaker-training`
**Backend args:** `{'instance_type': 'ml.g4dn.2xlarge', 'supported_instructions': None}`
**Number of evaluation samples:** `All dataset`
Fixed parameters:
* **model_name_or_path**: `elastic/distilbert-base-uncased-finetuned-conll03-english`
* **dataset**:
* **path**: `conll2003`
* **eval_split**: `validation`
* **data_keys**: `{'primary': 'tokens'}`
* **ref_keys**: `['ner_tags']`
* **calibration_split**: `train`
* **quantization_approach**: `static`
* **operators_to_quantize**: `['Add', 'MatMul']`
* **per_channel**: `False`
* **calibration**:
* **method**: `minmax`
* **num_calibration_samples**: `100`
* **framework**: `onnxruntime`
* **framework_args**:
* **opset**: `11`
* **optimization_level**: `1`
* **aware_training**: `False`
Benchmarked parameters:
* **node_exclusion**: `[]`, `['layernorm', 'gelu', 'residual', 'gather', 'softmax']`
# Evaluation
## Non-time metrics
| node_exclusion | | precision (original) | precision (optimized) | | recall (original) | recall (optimized) | | f1 (original) | f1 (optimized) | | accuracy (original) | accuracy (optimized) |
| :------------------------------------------------------: | :-: | :------------------: | :-------------------: | :-: | :---------------: | :----------------: | :-: | :-----------: | :------------: | :-: | :-----------------: | :------------------: |
| `['layernorm', 'gelu', 'residual', 'gather', 'softmax']` | \| | 0.936 | 0.904 | \| | 0.944 | 0.921 | \| | 0.940 | 0.912 | \| | 0.988 | 0.984 |
| `[]` | \| | 0.936 | 0.065 | \| | 0.944 | 0.243 | \| | 0.940 | 0.103 | \| | 0.988 | 0.357 |
## Time metrics
Time benchmarks were run for 15 seconds per config.
Below, time metrics for batch size = 4, input length = 64.
| node_exclusion | | latency_mean (original, ms) | latency_mean (optimized, ms) | | throughput (original, /s) | throughput (optimized, /s) |
| :------------------------------------------------------: | :-: | :-------------------------: | :--------------------------: | :-: | :-----------------------: | :------------------------: |
| `['layernorm', 'gelu', 'residual', 'gather', 'softmax']` | \| | 103.46 | 53.77 | \| | 9.67 | 18.60 |
| `[]` | \| | 90.62 | 65.86 | \| | 11.07 | 15.20 |
|
AnonymousSub/EManuals_BERT_copy | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
tags:
- FrozenLake-v1-8x8-no_slippery
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-FrozenLake-v1-8x8-noSlippery
results:
- metrics:
- type: mean_reward
value: 1.00 +/- 0.00
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-8x8-no_slippery
type: FrozenLake-v1-8x8-no_slippery
---
# **Q-Learning** Agent playing **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="Chris1/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
```
|
AnonymousSub/SR_bert-base-uncased | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imdb
metrics:
- accuracy
- f1
model-index:
- name: finetuning-sentiment-model-3000-samples
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: imdb
type: imdb
args: plain_text
metrics:
- name: Accuracy
type: accuracy
value: 0.86
- name: F1
type: f1
value: 0.8590604026845637
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuning-sentiment-model-3000-samples
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3239
- Accuracy: 0.86
- F1: 0.8591
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
AnonymousSub/SR_rule_based_hier_quadruplet_epochs_1_shard_1 | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
pipeline_tag: token-classification
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
tags:
- distilbert
---
**task**: `token-classification`
**Backend:** `sagemaker-training`
**Backend args:** `{'instance_type': 'ml.g4dn.2xlarge', 'supported_instructions': None}`
**Number of evaluation samples:** `All dataset`
Fixed parameters:
* **model_name_or_path**: `elastic/distilbert-base-uncased-finetuned-conll03-english`
* **dataset**:
* **path**: `conll2003`
* **eval_split**: `validation`
* **data_keys**: `{'primary': 'tokens'}`
* **ref_keys**: `['ner_tags']`
* **calibration_split**: `train`
* **quantization_approach**: `static`
* **operators_to_quantize**: `['Add', 'MatMul']`
* **per_channel**: `False`
* **calibration**:
* **method**: `minmax`
* **num_calibration_samples**: `100`
* **framework**: `onnxruntime`
* **framework_args**:
* **opset**: `11`
* **optimization_level**: `1`
* **aware_training**: `False`
Benchmarked parameters:
* **node_exclusion**: `[]`, `['layernorm', 'gelu', 'residual', 'gather', 'softmax']`
# Evaluation
## Non-time metrics
| node_exclusion | | precision (original) | precision (optimized) | | recall (original) | recall (optimized) | | f1 (original) | f1 (optimized) | | accuracy (original) | accuracy (optimized) |
| :------------------------------------------------------: | :-: | :------------------: | :-------------------: | :-: | :---------------: | :----------------: | :-: | :-----------: | :------------: | :-: | :-----------------: | :------------------: |
| `['layernorm', 'gelu', 'residual', 'gather', 'softmax']` | \| | 0.936 | 0.904 | \| | 0.944 | 0.921 | \| | 0.940 | 0.912 | \| | 0.988 | 0.984 |
| `[]` | \| | 0.936 | 0.065 | \| | 0.944 | 0.243 | \| | 0.940 | 0.103 | \| | 0.988 | 0.357 |
## Time metrics
Time benchmarks were run for 15 seconds per config.
Below, time metrics for batch size = 4, input length = 64.
| node_exclusion | | latency_mean (original, ms) | latency_mean (optimized, ms) | | throughput (original, /s) | throughput (optimized, /s) |
| :------------------------------------------------------: | :-: | :-------------------------: | :--------------------------: | :-: | :-----------------------: | :------------------------: |
| `['layernorm', 'gelu', 'residual', 'gather', 'softmax']` | \| | 114.51 | 53.59 | \| | 8.73 | 18.67 |
| `[]` | \| | 90.67 | 59.55 | \| | 11.07 | 16.87 |
|
AnonymousSub/SR_rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1 | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null | ---
language:
- en
- ar
- bg
- de
- el
- fr
- hi
- ru
- es
- sw
- th
- tr
- ur
- vi
- zh
tags:
- generated_from_trainer
datasets:
- xnli
metrics:
- accuracy
model-index:
- name: pixel-base-finetuned-xnli-translate-train-all
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: XNLI
type: xnli
args: xnli
metrics:
- name: Joint validation accuracy
type: accuracy
value: 0.6254886211512718
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pixel-base-finetuned-xnli-translate-train-all
This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the XNLI dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 256
- eval_batch_size: 8
- seed: 555
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- training_steps: 50000
- mixed_precision_training: Apex, opt level O1
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.12.1
|
AnonymousSub/SR_rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_10 | [
"pytorch",
"roberta",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Pyramids
library_name: ml-agents
---
# **ppo** Agent playing **Pyramids**
This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
2. Step 1: Write your model_id: bothrajat/testpyramidsrnd
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play ๐
|
AnonymousSub/bert-base-uncased_squad2.0 | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | {
"architectures": [
"BertForQuestionAnswering"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- emotion
metrics:
- accuracy
- f1
model-index:
- name: distilbert-base-uncased-finetuned-emotion
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: emotion
type: emotion
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.926
- name: F1
type: f1
value: 0.9261470780516246
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-emotion
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2148
- Accuracy: 0.926
- F1: 0.9261
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| 0.8297 | 1.0 | 250 | 0.3235 | 0.9015 | 0.8977 |
| 0.2504 | 2.0 | 500 | 0.2148 | 0.926 | 0.9261 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.7.1
- Datasets 2.3.2
- Tokenizers 0.12.1
|
AnonymousSub/bert-base-uncased_wikiqa | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 30 | null | ---
tags:
- generated_from_trainer
metrics:
- precision
- recall
- f1
model-index:
- name: Originalbiobert-v1.1-BioRED-CD-128-32-30
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Originalbiobert-v1.1-BioRED-CD-128-32-30
This model is a fine-tuned version of [dmis-lab/biobert-v1.1](https://huggingface.co/dmis-lab/biobert-v1.1) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0001
- Precision: 0.9994
- Recall: 1.0
- F1: 0.9997
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 32
- eval_batch_size: 8
- seed: 1
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30.0
### Training results
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.0+cu102
- Datasets 2.3.2
- Tokenizers 0.10.3
|
AnonymousSub/bert_snips | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | 2022-07-13T17:09:43Z | ---
tags:
- generated_from_keras_callback
model-index:
- name: t5-tiny-finetuned-noisy-ms-en
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# t5-super-tiny-finetuned-noisy-ms-en
This model was finetuned from https://github.com/huseinzol05/malaya/tree/master/pretrained-model/t5, t5-super-tiny-social-media-2021-11-15.tar.gz, on https://huggingface.co/datasets/mesolitica/ms-en and https://huggingface.co/datasets/mesolitica/noisy-ms-en-augmentation
## Evaluation
### evaluation set
It achieves the following results on the evaluation set using SacreBLEU from [t5-super-tiny-noisy-ms-en-huggingface.ipynb](t5-super-tiny-noisy-ms-en-huggingface.ipynb):
```
{'name': 'BLEU',
'score': 59.92897086989418,
'_mean': -1.0,
'_ci': -1.0,
'_verbose': '79.8/64.0/54.1/46.6 (BP = 1.000 ratio = 1.008 hyp_len = 2017101 ref_len = 2001100)',
'bp': 1.0,
'counts': [1609890, 1235532, 997094, 818350],
'totals': [2017101, 1929506, 1842087, 1755069],
'sys_len': 2017101,
'ref_len': 2001100,
'precisions': [79.81206692178527,
64.03359201785328,
54.12849664538103,
46.62779640002758],
'prec_str': '79.8/64.0/54.1/46.6',
'ratio': 1.0079961021438208}
```
**The test set is from a semisupervised model, this model might generate better results than the semisupervised model**.
### FLORES200
It achieved the following results on the [NLLB 200 test set](https://github.com/facebookresearch/flores/tree/main/flores200) using SacreBLEU from [sacrebleu-mesolitica-t5-super-tiny-finetuned-noisy-ms-en-flores200.ipynb](sacrebleu-mesolitica-t5-super-tiny-finetuned-noisy-ms-en-flores200.ipynb),
```
chrF2++ = 59.12
```
### Framework versions
- Transformers 4.19.0
- TensorFlow 2.6.0
- Datasets 2.1.0
- Tokenizers 0.12.1 |
AnonymousSub/bert_triplet_epochs_1_shard_1 | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | 2022-07-13T17:26:06Z | ---
license: apache-2.0
tags:
- text-classification
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: SPECTER-finetuned-DAGPap22
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# SPECTER-finetuned-DAGPap22
This model is a fine-tuned version of [allenai/specter](https://huggingface.co/allenai/specter) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0023
- Accuracy: 0.9993
- F1: 0.9995
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 6e-06
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 50
- num_epochs: 20
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|
| 0.3422 | 1.0 | 669 | 0.4135 | 0.8914 | 0.9140 |
| 0.1074 | 2.0 | 1338 | 0.1216 | 0.9746 | 0.9811 |
| 0.0329 | 3.0 | 2007 | 0.0064 | 0.9989 | 0.9992 |
| 0.0097 | 4.0 | 2676 | 0.0132 | 0.9972 | 0.9980 |
| 0.0123 | 5.0 | 3345 | 0.0231 | 0.9961 | 0.9971 |
| 0.0114 | 6.0 | 4014 | 0.0080 | 0.9985 | 0.9989 |
| 0.0029 | 7.0 | 4683 | 0.2207 | 0.9727 | 0.9797 |
| 0.0075 | 8.0 | 5352 | 0.0145 | 0.9974 | 0.9981 |
| 0.0098 | 9.0 | 6021 | 0.0047 | 0.9994 | 0.9996 |
| 0.0025 | 10.0 | 6690 | 0.0000 | 1.0 | 1.0 |
| 0.0044 | 11.0 | 7359 | 0.0035 | 0.9993 | 0.9995 |
| 0.0 | 12.0 | 8028 | 0.0027 | 0.9996 | 0.9997 |
| 0.0027 | 13.0 | 8697 | 0.0036 | 0.9993 | 0.9995 |
| 0.0055 | 14.0 | 9366 | 0.0017 | 0.9998 | 0.9999 |
| 0.0 | 15.0 | 10035 | 0.0000 | 1.0 | 1.0 |
| 0.0 | 16.0 | 10704 | 0.0000 | 1.0 | 1.0 |
| 0.0022 | 17.0 | 11373 | 0.0111 | 0.9981 | 0.9986 |
| 0.0004 | 18.0 | 12042 | 0.0011 | 0.9994 | 0.9996 |
| 0.0 | 19.0 | 12711 | 0.0020 | 0.9994 | 0.9996 |
| 0.0 | 20.0 | 13380 | 0.0023 | 0.9993 | 0.9995 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
AnonymousSub/consert-s10-SR | [
"pytorch",
"bert",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 28 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Pyramids
library_name: ml-agents
---
# **ppo** Agent playing **Pyramids**
This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
2. Step 1: Write your model_id: AndrewK/testpyramidsrnd
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play ๐
|
AnonymousSub/consert-techqa | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | {
"architectures": [
"BertForQuestionAnswering"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 270.61 +/- 23.77
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
AnonymousSub/declutr-model_wikiqa | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"RobertaForSequenceClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 26 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- becasv2
model-index:
- name: distilbert-base-uncased-prueba2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-prueba2
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv2 dataset.
It achieves the following results on the evaluation set:
- Loss: 3.6356
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 10
- eval_batch_size: 10
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 9 | 3.9054 |
| No log | 2.0 | 18 | 3.1893 |
| No log | 3.0 | 27 | 2.9748 |
| No log | 4.0 | 36 | 3.1541 |
| No log | 5.0 | 45 | 3.2887 |
| No log | 6.0 | 54 | 3.5055 |
| No log | 7.0 | 63 | 3.5902 |
| No log | 8.0 | 72 | 3.6356 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
AnonymousSub/roberta-base_squad2.0 | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | {
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 6 | 2022-07-13T22:31:54Z | ---
tags:
- conversational
---
# Will Byers DialoGPT model |
AnonymousSub/rule_based_bert_hier_diff_equal_wts_epochs_1_shard_1 | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: apache-2.0
tags:
- image-classification
- pytorch
- onnx
datasets:
- pyronear/openfire
---
# MobileNet V3 - Small model
Pretrained on a dataset for wildfire binary classification (soon to be shared). The MobileNet V3 architecture was introduced in [this paper](https://arxiv.org/pdf/1905.02244.pdf).
## Model description
The core idea of the author is to simplify the final stage, while using SiLU as activations and making Squeeze-and-Excite blocks larger.
## Installation
### Prerequisites
Python 3.6 (or higher) and [pip](https://pip.pypa.io/en/stable/)/[conda](https://docs.conda.io/en/latest/miniconda.html) are required to install PyroVision.
### Latest stable release
You can install the last stable release of the package using [pypi](https://pypi.org/project/pyrovision/) as follows:
```shell
pip install pyrovision
```
or using [conda](https://anaconda.org/pyronear/pyrovision):
```shell
conda install -c pyronear pyrovision
```
### Developer mode
Alternatively, if you wish to use the latest features of the project that haven't made their way to a release yet, you can install the package from source *(install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) first)*:
```shell
git clone https://github.com/pyronear/pyro-vision.git
pip install -e pyro-vision/.
```
## Usage instructions
```python
from PIL import Image
from torchvision.transforms import Compose, ConvertImageDtype, Normalize, PILToTensor, Resize
from torchvision.transforms.functional import InterpolationMode
from pyrovision.models import model_from_hf_hub
model = model_from_hf_hub("pyronear/mobilenet_v3_small").eval()
img = Image.open(path_to_an_image).convert("RGB")
# Preprocessing
config = model.default_cfg
transform = Compose([
Resize(config['input_shape'][1:], interpolation=InterpolationMode.BILINEAR),
PILToTensor(),
ConvertImageDtype(torch.float32),
Normalize(config['mean'], config['std'])
])
input_tensor = transform(img).unsqueeze(0)
# Inference
with torch.inference_mode():
output = model(input_tensor)
probs = output.squeeze(0).softmax(dim=0)
```
## Citation
Original paper
```bibtex
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
eprinttype = {arXiv},
eprint = {1905.02244},
timestamp = {Thu, 27 May 2021 16:20:51 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
Source of this implementation
```bibtex
@software{chintala_torchvision_2017,
author = {Chintala, Soumith},
month = {4},
title = {{Torchvision}},
url = {https://github.com/pytorch/vision},
year = {2017}
}
``` |
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_wikiqa | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"RobertaForSequenceClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 24 | 2022-07-14T05:17:52Z | ---
license: apache-2.0
tags:
- summarization
- generated_from_trainer
datasets:
- cnn_dailymail
metrics:
- rouge
model-index:
- name: mt5-small-finetuned-amazon-en-es
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: cnn_dailymail
type: cnn_dailymail
args: 3.0.0
metrics:
- name: Rouge1
type: rouge
value: 22.6804
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mt5-small-finetuned-amazon-en-es
This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the cnn_dailymail dataset.
It achieves the following results on the evaluation set:
- Loss: 2.4413
- Rouge1: 22.6804
- Rouge2: 8.3299
- Rougel: 17.9992
- Rougelsum: 20.7342
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5.6e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|
| 7.77 | 1.0 | 240 | 2.7230 | 17.25 | 5.629 | 14.0381 | 15.8959 |
| 3.7586 | 2.0 | 480 | 2.5949 | 19.4577 | 6.9354 | 15.772 | 17.8773 |
| 3.4314 | 3.0 | 720 | 2.5355 | 20.0511 | 7.6417 | 16.0889 | 18.4551 |
| 3.2892 | 4.0 | 960 | 2.4845 | 20.3951 | 7.88 | 16.601 | 19.0048 |
| 3.1954 | 5.0 | 1200 | 2.4612 | 20.1806 | 7.2656 | 16.2658 | 18.6222 |
| 3.1128 | 6.0 | 1440 | 2.4544 | 22.5647 | 8.0899 | 17.8057 | 20.487 |
| 3.103 | 7.0 | 1680 | 2.4498 | 22.7048 | 8.384 | 17.978 | 20.6871 |
| 3.0708 | 8.0 | 1920 | 2.4413 | 22.6804 | 8.3299 | 17.9992 | 20.7342 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_wikiqa | [
"pytorch",
"roberta",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"RobertaForSequenceClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 24 | null | ---
language:
- code
- code
thumbnail: "url to a thumbnail used in social sharing"
tags:
- tag1
- tag2
license: "mit"
datasets:
- dataset1
- dataset2
metrics:
- metric1
- metric2
pipeline_tag: "text-classification"
widget:
- text: "Jens Peter Hansen kommer fra Danmark"
---
README |
AnonymousSub/specter-emanuals-model | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 6 | null | # Tranception model
This Hugging Face Hub repo contains the model checkpoint for the Tranception model as described in our paper ["Tranception: protein fitness prediction with autoregressive transformers and inference-time retrieval"](https://arxiv.org/abs/2205.13760). The official GitHub repository can be accessed [here](https://github.com/OATML-Markslab/Tranception). This project is a joint collaboration between the [Marks lab](https://www.deboramarkslab.com/) and the [OATML group](https://oatml.cs.ox.ac.uk/).
## Abstract
The ability to accurately model the fitness landscape of protein sequences is critical to a wide range of applications, from quantifying the effects of human variants on disease likelihood, to predicting immune-escape mutations in viruses and designing novel biotherapeutic proteins. Deep generative models of protein sequences trained on multiple sequence alignments have been the most successful approaches so far to address these tasks. The performance of these methods is however contingent on the availability of sufficiently deep and diverse alignments for reliable training. Their potential scope is thus limited by the fact many protein families are hard, if not impossible, to align. Large language models trained on massive quantities of non-aligned protein sequences from diverse families address these problems and show potential to eventually bridge the performance gap. We introduce Tranception, a novel transformer architecture leveraging autoregressive predictions and retrieval of homologous sequences at inference to achieve state-of-the-art fitness prediction performance. Given its markedly higher performance on multiple mutants, robustness to shallow alignments and ability to score indels, our approach offers significant gain of scope over existing approaches. To enable more rigorous model testing across a broader range of protein families, we develop ProteinGym -- an extensive set of multiplexed assays of variant effects, substantially increasing both the number and diversity of assays compared to existing benchmarks.
## License
This project is available under the MIT license.
## Reference
If you use Tranception or other files provided through our GitHub repository, please cite the following paper:
```
Notin, P., Dias, M., Frazer, J., Marchena-Hurtado, J., Gomez, A., Marks, D.S., Gal, Y. (2022). Tranception: Protein Fitness Prediction with Autoregressive Transformers and Inference-time Retrieval. ICML.
```
## Links
Pre-print: https://arxiv.org/abs/2205.13760
GitHub: https://github.com/OATML-Markslab/Tranception |
AnonymousSub/unsup-consert-base_squad2.0 | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
] | question-answering | {
"architectures": [
"BertForQuestionAnswering"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
license: bigscience-bloom-rail-1.0
language:
- ak
- ar
- as
- bm
- bn
- ca
- code
- en
- es
- eu
- fon
- fr
- gu
- hi
- id
- ig
- ki
- kn
- lg
- ln
- ml
- mr
- ne
- nso
- ny
- or
- pa
- pt
- rn
- rw
- sn
- st
- sw
- ta
- te
- tn
- ts
- tum
- tw
- ur
- vi
- wo
- xh
- yo
- zh
- zhs
- zht
- zu
pipeline_tag: text-generation
--- |
AnonymousSub/unsup-consert-emanuals | [
"pytorch",
"bert",
"feature-extraction",
"transformers"
] | feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 222.42 +/- 18.29
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Anthos23/my-awesome-model | [
"pytorch",
"tf",
"roberta",
"text-classification",
"transformers"
] | text-classification | {
"architectures": [
"RobertaForSequenceClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 30 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: -26.78 +/- 71.92
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
ArBert/albert-base-v2-finetuned-ner-gmm-twitter | [
"pytorch",
"tensorboard",
"albert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: predict-perception-bertino-cause-concept
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# predict-perception-bertino-cause-concept
This model is a fine-tuned version of [indigo-ai/BERTino](https://huggingface.co/indigo-ai/BERTino) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2035
- R2: -0.3662
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 20
- eval_batch_size: 8
- seed: 1996
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 47
### Training results
| Training Loss | Epoch | Step | Validation Loss | R2 |
|:-------------:|:-----:|:----:|:---------------:|:-------:|
| 0.3498 | 1.0 | 14 | 0.1845 | -0.2382 |
| 0.2442 | 2.0 | 28 | 0.1575 | -0.0573 |
| 0.1553 | 3.0 | 42 | 0.2216 | -0.4872 |
| 0.0726 | 4.0 | 56 | 0.1972 | -0.3234 |
| 0.0564 | 5.0 | 70 | 0.2832 | -0.9009 |
| 0.0525 | 6.0 | 84 | 0.1854 | -0.2444 |
| 0.0385 | 7.0 | 98 | 0.2816 | -0.8900 |
| 0.0257 | 8.0 | 112 | 0.1815 | -0.2183 |
| 0.03 | 9.0 | 126 | 0.3065 | -1.0576 |
| 0.0275 | 10.0 | 140 | 0.1991 | -0.3367 |
| 0.0175 | 11.0 | 154 | 0.2400 | -0.6110 |
| 0.017 | 12.0 | 168 | 0.1915 | -0.2856 |
| 0.0158 | 13.0 | 182 | 0.2008 | -0.3477 |
| 0.0127 | 14.0 | 196 | 0.1932 | -0.2968 |
| 0.009 | 15.0 | 210 | 0.2500 | -0.6783 |
| 0.0078 | 16.0 | 224 | 0.1969 | -0.3215 |
| 0.0075 | 17.0 | 238 | 0.1857 | -0.2463 |
| 0.0079 | 18.0 | 252 | 0.2405 | -0.6145 |
| 0.0089 | 19.0 | 266 | 0.1865 | -0.2517 |
| 0.0082 | 20.0 | 280 | 0.2275 | -0.5267 |
| 0.0078 | 21.0 | 294 | 0.1890 | -0.2687 |
| 0.0072 | 22.0 | 308 | 0.2230 | -0.4965 |
| 0.0064 | 23.0 | 322 | 0.2286 | -0.5346 |
| 0.0052 | 24.0 | 336 | 0.2154 | -0.4457 |
| 0.0049 | 25.0 | 350 | 0.1901 | -0.2757 |
| 0.0062 | 26.0 | 364 | 0.1917 | -0.2870 |
| 0.0043 | 27.0 | 378 | 0.2042 | -0.3704 |
| 0.0038 | 28.0 | 392 | 0.2251 | -0.5110 |
| 0.0049 | 29.0 | 406 | 0.2092 | -0.4040 |
| 0.0044 | 30.0 | 420 | 0.2119 | -0.4221 |
| 0.0041 | 31.0 | 434 | 0.2018 | -0.3542 |
| 0.0039 | 32.0 | 448 | 0.1875 | -0.2586 |
| 0.0038 | 33.0 | 462 | 0.1980 | -0.3291 |
| 0.0038 | 34.0 | 476 | 0.2071 | -0.3903 |
| 0.0043 | 35.0 | 490 | 0.1998 | -0.3412 |
| 0.0043 | 36.0 | 504 | 0.2052 | -0.3771 |
| 0.004 | 37.0 | 518 | 0.2143 | -0.4382 |
| 0.004 | 38.0 | 532 | 0.1977 | -0.3273 |
| 0.0039 | 39.0 | 546 | 0.2002 | -0.3439 |
| 0.0034 | 40.0 | 560 | 0.2035 | -0.3659 |
| 0.0036 | 41.0 | 574 | 0.1994 | -0.3387 |
| 0.0029 | 42.0 | 588 | 0.2036 | -0.3667 |
| 0.0032 | 43.0 | 602 | 0.2055 | -0.3797 |
| 0.0029 | 44.0 | 616 | 0.2025 | -0.3593 |
| 0.0027 | 45.0 | 630 | 0.2047 | -0.3743 |
| 0.0033 | 46.0 | 644 | 0.2067 | -0.3877 |
| 0.0027 | 47.0 | 658 | 0.2035 | -0.3662 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.2+cu113
- Datasets 1.18.3
- Tokenizers 0.11.0
|
ArBert/albert-base-v2-finetuned-ner-kmeans | [
"pytorch",
"tensorboard",
"albert",
"token-classification",
"transformers",
"autotrain_compatible"
] | token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: predict-perception-bertino-focus-assassin
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# predict-perception-bertino-focus-assassin
This model is a fine-tuned version of [indigo-ai/BERTino](https://huggingface.co/indigo-ai/BERTino) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3409
- R2: 0.3205
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 20
- eval_batch_size: 8
- seed: 1996
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 47
### Training results
| Training Loss | Epoch | Step | Validation Loss | R2 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.5573 | 1.0 | 14 | 0.4856 | 0.0321 |
| 0.1739 | 2.0 | 28 | 0.4735 | 0.0562 |
| 0.0813 | 3.0 | 42 | 0.3416 | 0.3191 |
| 0.0764 | 4.0 | 56 | 0.3613 | 0.2799 |
| 0.0516 | 5.0 | 70 | 0.3264 | 0.3495 |
| 0.0459 | 6.0 | 84 | 0.4193 | 0.1643 |
| 0.0414 | 7.0 | 98 | 0.3502 | 0.3019 |
| 0.028 | 8.0 | 112 | 0.3361 | 0.3301 |
| 0.0281 | 9.0 | 126 | 0.3610 | 0.2804 |
| 0.027 | 10.0 | 140 | 0.3523 | 0.2978 |
| 0.0216 | 11.0 | 154 | 0.3440 | 0.3143 |
| 0.0181 | 12.0 | 168 | 0.3506 | 0.3012 |
| 0.013 | 13.0 | 182 | 0.3299 | 0.3424 |
| 0.0116 | 14.0 | 196 | 0.3611 | 0.2803 |
| 0.0118 | 15.0 | 210 | 0.3505 | 0.3013 |
| 0.0139 | 16.0 | 224 | 0.3529 | 0.2967 |
| 0.0099 | 17.0 | 238 | 0.3536 | 0.2952 |
| 0.0096 | 18.0 | 252 | 0.3542 | 0.2941 |
| 0.0107 | 19.0 | 266 | 0.3770 | 0.2486 |
| 0.0088 | 20.0 | 280 | 0.3467 | 0.3091 |
| 0.0065 | 21.0 | 294 | 0.3327 | 0.3369 |
| 0.0073 | 22.0 | 308 | 0.3479 | 0.3066 |
| 0.0062 | 23.0 | 322 | 0.3566 | 0.2893 |
| 0.0063 | 24.0 | 336 | 0.3503 | 0.3019 |
| 0.0057 | 25.0 | 350 | 0.3371 | 0.3282 |
| 0.0049 | 26.0 | 364 | 0.3334 | 0.3355 |
| 0.0045 | 27.0 | 378 | 0.3399 | 0.3225 |
| 0.0049 | 28.0 | 392 | 0.3379 | 0.3266 |
| 0.0049 | 29.0 | 406 | 0.3377 | 0.3268 |
| 0.0055 | 30.0 | 420 | 0.3357 | 0.3309 |
| 0.005 | 31.0 | 434 | 0.3394 | 0.3235 |
| 0.0046 | 32.0 | 448 | 0.3432 | 0.3159 |
| 0.0048 | 33.0 | 462 | 0.3427 | 0.3169 |
| 0.0041 | 34.0 | 476 | 0.3450 | 0.3123 |
| 0.0041 | 35.0 | 490 | 0.3436 | 0.3151 |
| 0.0051 | 36.0 | 504 | 0.3394 | 0.3234 |
| 0.0037 | 37.0 | 518 | 0.3370 | 0.3283 |
| 0.004 | 38.0 | 532 | 0.3370 | 0.3284 |
| 0.0033 | 39.0 | 546 | 0.3339 | 0.3344 |
| 0.0034 | 40.0 | 560 | 0.3335 | 0.3352 |
| 0.003 | 41.0 | 574 | 0.3373 | 0.3276 |
| 0.0035 | 42.0 | 588 | 0.3380 | 0.3264 |
| 0.0032 | 43.0 | 602 | 0.3382 | 0.3259 |
| 0.0034 | 44.0 | 616 | 0.3432 | 0.3158 |
| 0.003 | 45.0 | 630 | 0.3421 | 0.3181 |
| 0.0027 | 46.0 | 644 | 0.3410 | 0.3203 |
| 0.0037 | 47.0 | 658 | 0.3409 | 0.3205 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.2+cu113
- Datasets 1.18.3
- Tokenizers 0.11.0
|
ArBert/albert-base-v2-finetuned-ner | [
"pytorch",
"tensorboard",
"albert",
"token-classification",
"dataset:conll2003",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index",
"autotrain_compatible"
] | token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 19 | null | ---
language: zh
tags:
- pytorch
license: mit
---
# EVA
## Model Description
EVA is the largest open-source Chinese dialogue model with up to 2.8B parameters. The 1.0 version model is pre-trained on [WudaoCorpus-Dialog](https://resource.wudaoai.cn/home), and the 2.0 version is pre-trained on a carefully cleaned version of WudaoCorpus-Dialog which yields better performance than the 1.0 version. [Paper link](https://arxiv.org/abs/2108.01547) of EVA1.0. [Paper link](https://arxiv.org/abs/2203.09313) of EVA2.0.
## Model Configuration
| Model | n_params | n_enc-layers | n_dec-layers | d_model | d_ff | n_heads | d_head | attn-scale |
| ------------- | -------- | ------------ | ------------ | ------- | ----- | ------- | ------ | ---------- |
| EVA1.0 | 2.8B | 24 | 24 | 2,048 | 5,120 | 32 | 64 | No |
| EVA2.0_Base | 300M | 12 | 12 | 768 | 3,072 | 12 | 64 | Yes |
| EVA2.0_Large | 970M | 24 | 24 | 1,024 | 4,096 | 16 | 64 | Yes |
| EVA2.0_xLarge | 2.8B | 24 | 24 | 2,048 | 5,120 | 32 | 64 | Yes |
## How to use
Please refer to our GitHub [repository](https://github.com/thu-coai/EVA/tree/huggingface).
## Performance
We use the KdConv dataset to finetune and evaluate the model. Under the default hyperparameters in the scripts, we get the following results on the test set:
| | | | | |
| ---- | ---- | ---- | ---- | ---- |
| | loss | BLEU-4 | Dist-4 | f1 |
| eva1.0 Before finetune | 3.49 | 0.64 | 85.94 | 13.24 |
| eva1.0 After finetune | 2.37 | 3.94 | 72.47 | 19.80 |
| eva2.0 Before finetune | 3.14 | 1.02 | 82.67 | 14.36 |
| eva2.0 After finetune | 2.09 | 4.69 | 73.03 | 22.91 |
We also use beam search to run the interactive inference of EVA2.0:
```[]
Usr >>> ไฝ ๅฅฝ
Sys >>> ไฝ ๅฅฝ๏ผ่ฏท้ฎไฝ ๆฏ๏ผ
Usr >>> ๆๆฏๅฐๆ
Sys >>> ๅฅฝ็๏ผๅฐๆ๏ผๆๆฏๆฐๆฅ็๏ผ่ฏทๅคๅ
ณ็
ง
Usr >>> ไฝ ไบ่งฃไบบๅทฅๆบ่ฝๅ๏ผ
Sys >>> ไธไบ่งฃ๏ผๆๅช็ฅ้ๆๆฏๆบๅจไบบ
Usr >>> ้ฃไฝ ๅพ่ชๆ
Sys >>> ่ฐข่ฐขๅคธๅฅ๏ผๆไผ็ปง็ปญๅชๅ็
Usr >>> ไฝ ๆฏๅญฆไปไนไธไธ็ๅ
Sys >>> ่ฎก็ฎๆบ็งๅญฆไธๆๆฏ๏ผไฝ ๅข๏ผ
Usr >>> ๆไนๆฏๅญฆ่ฎก็ฎๆบ็
Sys >>> ๅฆ๏ผ้ฃไฝ ๆไปไนๆณ็ฅ้็๏ผๅฏไปฅ้ฎๆ
Usr >>> ็ผ็จ่ฏญ่จไฝ ๅญฆ่ฟๅชไบ๏ผ
Sys >>> java,c,c+้ฝๅญฆ่ฟ
Usr >>> ไฝ ๅญฆ็็ๅค
Sys >>> ๅตๅต๏ผ่ฟๅฅฝๅง๏ผๆๆฏ่พๅๆฌขๅไปฃ็
Usr >>> ๅๅ๏ผๆไนๅๆฌขๅไปฃ็ ๏ผไปฅๅๅฏไปฅๅคๅคไบคๆต
Sys >>> ๅฏ๏ผๅฅฝ็
Usr >>> ๆๆๅฝ
Sys >>> ๆ๏ฝ
```
**NOET**: Since different hardware may have different implementations of random functions, even if you use the same random seed as ours, you may not be able to reproduce this case. But the overall performance will not differ much.
## Disclaimer
The pre-trained models aim to facilitate the research for conversation generation. The model provided in this repository is trained on a large dataset collected from various sources. Although a rigorous cleaning and filtering process has been carried out to the data and the model output, there is no guarantee that all the inappropriate contents have been completely banned. All the contents generated by the model do not represent the authors' opinions. The decoding script provided in this repository is only for research purposes. We are not responsible for any content generated using our model.
## Citation
```
@article{coai2021eva,
title={EVA: An Open-Domain Chinese Dialogue System with Large-Scale Generative Pre-Training},
author={Zhou, Hao and Ke, Pei and Zhang, Zheng and Gu, Yuxian and Zheng, Yinhe and Zheng, Chujie and Wang, Yida and Wu, Chen Henry and Sun, Hao and Yang, Xiaocong and Wen, Bosi and Zhu, Xiaoyan and Huang, Minlie and Tang, Jie},
journal={arXiv preprint arXiv:2108.01547},
year={2021}
}
@article{coai2022eva2,
title={{EVA2.0}: Investigating Open-Domain Chinese Dialogue Systems with Large-Scale Pre-Training},
author={Gu, Yuxian and Wen, Jiaxin and Sun, Hao and Song, Yi and Ke, Pei and Zheng, Chujie and Zhang, Zheng and Yao, Jianzhu and Zhu, Xiaoyan and Tang, Jie and Huang, Minlie},
journal={arXiv preprint arXiv:2203.09313},
year={2022}
}
``` |
ArBert/bert-base-uncased-finetuned-ner-agglo | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: predict-perception-bertino-focus-victim
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# predict-perception-bertino-focus-victim
This model is a fine-tuned version of [indigo-ai/BERTino](https://huggingface.co/indigo-ai/BERTino) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2497
- R2: 0.6131
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 20
- eval_batch_size: 8
- seed: 1996
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 47
### Training results
| Training Loss | Epoch | Step | Validation Loss | R2 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.5438 | 1.0 | 14 | 0.4405 | 0.3175 |
| 0.2336 | 2.0 | 28 | 0.2070 | 0.6792 |
| 0.0986 | 3.0 | 42 | 0.2868 | 0.5555 |
| 0.0907 | 4.0 | 56 | 0.2916 | 0.5481 |
| 0.0652 | 5.0 | 70 | 0.2187 | 0.6611 |
| 0.0591 | 6.0 | 84 | 0.2320 | 0.6406 |
| 0.0478 | 7.0 | 98 | 0.2501 | 0.6125 |
| 0.0347 | 8.0 | 112 | 0.2425 | 0.6243 |
| 0.021 | 9.0 | 126 | 0.2670 | 0.5863 |
| 0.0214 | 10.0 | 140 | 0.2853 | 0.5580 |
| 0.0172 | 11.0 | 154 | 0.2726 | 0.5776 |
| 0.0177 | 12.0 | 168 | 0.2629 | 0.5927 |
| 0.0152 | 13.0 | 182 | 0.2396 | 0.6287 |
| 0.012 | 14.0 | 196 | 0.2574 | 0.6012 |
| 0.0119 | 15.0 | 210 | 0.2396 | 0.6288 |
| 0.0128 | 16.0 | 224 | 0.2517 | 0.6100 |
| 0.0109 | 17.0 | 238 | 0.2509 | 0.6112 |
| 0.008 | 18.0 | 252 | 0.2522 | 0.6092 |
| 0.0101 | 19.0 | 266 | 0.2503 | 0.6121 |
| 0.0075 | 20.0 | 280 | 0.2527 | 0.6084 |
| 0.0082 | 21.0 | 294 | 0.2544 | 0.6058 |
| 0.0061 | 22.0 | 308 | 0.2510 | 0.6111 |
| 0.006 | 23.0 | 322 | 0.2402 | 0.6279 |
| 0.005 | 24.0 | 336 | 0.2539 | 0.6066 |
| 0.0058 | 25.0 | 350 | 0.2438 | 0.6222 |
| 0.0051 | 26.0 | 364 | 0.2439 | 0.6221 |
| 0.006 | 27.0 | 378 | 0.2442 | 0.6216 |
| 0.0061 | 28.0 | 392 | 0.2416 | 0.6257 |
| 0.0053 | 29.0 | 406 | 0.2519 | 0.6097 |
| 0.0045 | 30.0 | 420 | 0.2526 | 0.6085 |
| 0.0034 | 31.0 | 434 | 0.2578 | 0.6006 |
| 0.0039 | 32.0 | 448 | 0.2557 | 0.6038 |
| 0.0043 | 33.0 | 462 | 0.2538 | 0.6068 |
| 0.0041 | 34.0 | 476 | 0.2535 | 0.6072 |
| 0.0042 | 35.0 | 490 | 0.2560 | 0.6033 |
| 0.0037 | 36.0 | 504 | 0.2576 | 0.6009 |
| 0.0036 | 37.0 | 518 | 0.2634 | 0.5919 |
| 0.0037 | 38.0 | 532 | 0.2582 | 0.5999 |
| 0.0038 | 39.0 | 546 | 0.2552 | 0.6045 |
| 0.0034 | 40.0 | 560 | 0.2563 | 0.6028 |
| 0.0033 | 41.0 | 574 | 0.2510 | 0.6110 |
| 0.0029 | 42.0 | 588 | 0.2515 | 0.6103 |
| 0.0033 | 43.0 | 602 | 0.2525 | 0.6088 |
| 0.0028 | 44.0 | 616 | 0.2522 | 0.6093 |
| 0.0028 | 45.0 | 630 | 0.2526 | 0.6085 |
| 0.0027 | 46.0 | 644 | 0.2494 | 0.6136 |
| 0.0024 | 47.0 | 658 | 0.2497 | 0.6131 |
### Framework versions
- Transformers 4.16.2
- Pytorch 1.10.2+cu113
- Datasets 1.18.3
- Tokenizers 0.11.0
|
ArBert/bert-base-uncased-finetuned-ner | [
"pytorch",
"tensorboard",
"bert",
"token-classification",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
] | token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imagefolder
metrics:
- accuracy
model-index:
- name: swin-tiny-patch4-window7-224-finetuned-eurosat
results:
- task:
name: Image Classification
type: image-classification
dataset:
name: imagefolder
type: imagefolder
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.9725925925925926
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# swin-tiny-patch4-window7-224-finetuned-eurosat
This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0814
- Accuracy: 0.9726
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 96
- eval_batch_size: 96
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 384
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.3216 | 0.99 | 63 | 0.1349 | 0.9589 |
| 0.2 | 1.99 | 126 | 0.0873 | 0.9704 |
| 0.1664 | 2.99 | 189 | 0.0814 | 0.9726 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.10.2+cu102
- Datasets 2.3.2
- Tokenizers 0.11.6
|
ArBert/roberta-base-finetuned-ner-agglo-twitter | [
"pytorch",
"tensorboard",
"roberta",
"token-classification",
"transformers",
"generated_from_trainer",
"license:mit",
"autotrain_compatible"
] | token-classification | {
"architectures": [
"RobertaForTokenClassification"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de-fr
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.en
metrics:
- name: F1
type: f1
value: 0.6886160714285715
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de-fr
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4043
- F1: 0.6886
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.1347 | 1.0 | 50 | 0.5771 | 0.4880 |
| 0.5066 | 2.0 | 100 | 0.4209 | 0.6582 |
| 0.3631 | 3.0 | 150 | 0.4043 | 0.6886 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.0+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
Aran/DialoGPT-medium-harrypotter | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | 2022-07-14T16:04:46Z |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Pyramids
library_name: ml-agents
---
# **ppo** Agent playing **Pyramids**
This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
2. Step 1: Write your model_id: dbarbedillo/testpyramidsrnd
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play ๐
|
Aran/DialoGPT-small-harrypotter | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
tags:
- generated_from_trainer
datasets:
- SkelterLabsInc/JaQuAD
model-index:
- name: pixel-base-finetuned-jaquad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pixel-base-finetuned-jaquad
This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the SkelterLabsInc/JaQuAD dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 7e-05
- train_batch_size: 32
- eval_batch_size: 8
- seed: 45
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 100
- training_steps: 20000
- mixed_precision_training: Apex, opt level O1
### Training results
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.12.1
|
ArashEsk95/bert-base-uncased-finetuned-sst2 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-all
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-all
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1348
- F1: 0.8844
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.3055 | 1.0 | 835 | 0.1755 | 0.8272 |
| 0.1561 | 2.0 | 1670 | 0.1441 | 0.8727 |
| 0.1016 | 3.0 | 2505 | 0.1348 | 0.8844 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.0+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
Arghyad/Loki_small | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 246.41 +/- 23.87
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
ArnaudPannatier/MLPMixer | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-CartPole-v1
results:
- metrics:
- type: mean_reward
value: 500.00 +/- 0.00
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
|
Arnold/wav2vec2-hausa-demo-colab | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-14T19:40:59Z | Hosts the pre-tained extracted model from glove.twitter.27B.100d.txt from https://huggingface.co/stanfordnlp/glove/tree/main
Used in: https://github.com/Juliano-rb/experiments_fault_injection_mlaas |
ArpanZS/search_model | [
"joblib"
] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- image-classification
- timm
library_tag: timm
---
# Model card for resnet18-random |
Arpita/opus-mt-en-ro-finetuned-syn-to-react | [
"pytorch",
"tensorboard",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | {
"architectures": [
"MarianMTModel"
],
"model_type": "marian",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
tags:
- generated_from_trainer
model-index:
- name: bert-base-cased-wikitext2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-cased-wikitext2
This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 6.9846
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 7.7422 | 1.0 | 782 | 7.1373 |
| 7.0302 | 2.0 | 1564 | 6.9972 |
| 6.9788 | 3.0 | 2346 | 7.0087 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.0+cu102
- Datasets 1.14.0
- Tokenizers 0.10.3
|
ArshdeepSekhon050/DialoGPT-medium-RickAndMorty | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-14T20:54:17Z | ---
license: mit
---
# Predicting Effects and Aromas
<div align="center" style="text-align:center; margin-top:1rem; margin-bottom: 1rem;">
<img width="240px" alt="" src="https://firebasestorage.googleapis.com/v0/b/cannlytics.appspot.com/o/public%2Fimages%2Flogos%2Fskunkfx_logo.png?alt=media&token=1a75b3cc-3230-446c-be7d-5c06012c8e30">
</div>
> "It's been hard to breathe and the smell's been just horrendous... [It's] like you've literally been sprayed by a
**skunk**." - Resident of Prague, Oklahoma in
[*'It's nasty': Prague neighbors push back on area cannabis facility*](https://kfor.com/news/local/its-nasty-prague-neighbors-push-back-on-area-cannabis-facility/), Oklahoma News 4 (2022).
## Objective
Can we build a model to **predict** if someone may *report* specific **effects** or **aromas** given a cannabis productโs **lab results**?
## Literature
[Over eight hundred cannabis strains characterized by the relationship between their psychoactive effects,
perceptual profiles, and chemical compositions](https://www.biorxiv.org/content/10.1101/759696v1.abstract) by Laura Alethia de la Fuente, Federico Zamberlan, Andres Sanchez, Facundo Carrillo, Enzo Tagliazucchi, Carla Pallavicini (2019).
* **Claim**: *"While cannabinoid content was variable even within individual strains, terpene profiles matched the perceptual characterizations made by the users and could be used to predict associations between different psychoactive effects."*
## Data
A panel of strain reviews was curated from the data published by [Alethia, et. al. (2019)](https://data.mendeley.com/datasets/6zwcgrttkp/1). First, we downloaded the authors' strain review and lab result datasets. We then curated terpene and cannabinoid data from the raw text files in the lab result dataset. Average cannabinoid and terpene concentrations were calculated for each of the 184 strains in the dataset from 431 lab results. Reviews are for purported strains and the lab results may or may not be representative of the concentration of the product that the reviewer is referencing. However, without the actual lab results of the product that the reviewer is referencing, the average concentrations for similarly named products can serve as an estimate. The following processing and assumptions were applied.
- Field names were transformed to `snake_case`.
- The fields `total_terpenes` and `total_cannabinoids` were calculated as the simple sum of all terpenes and cannabinoids respectively.
- The fields `total_thc`, `total_cbd`, and `total_cbg` were calculated using the decarboxylation rate (87.7%) for THCA, CBDA, and CBGA.
- Observations with `total_cannabinoids` greater than 35% or `total_terpenes` greater than 6% were presumed to be outliers and were excluded.
- The field `classification` was determined by the original authors from natural language processing (NLP) and can take a value of `sativa`, `indica`, or `hybrid` depending on the language in the reviewer's description.
- Fields for each reported aroma and effect were created and assigned a value of 1 if the reviewer reported the aroma or effect and 0 otherwise.
- Terpenes of similar names were combined on missing values: `p_cymene` with `pcymene`, `beta_caryophyllene` with `caryophyllene`, and `humulene` with `alpha_humulene`.
- Certain terpenes were summed into a encompassing field: `ocimene`, `beta_ocimene`, `trans_ocimene` to `ocimene` and `trans_nerolidol`, `cis_nerolidol`, `transnerolidol_1`, `transnerolidol_2` to `nerolidol`.
- A new field, `terpinenes`, was created as the sum of `alpha_terpinene`, `gamma_terpinene`, `terpinolene`, and `terpinene`.
| Datasets | URL |
|----------|-----|
| Raw data | <https://data.mendeley.com/datasets/6zwcgrttkp/1> |
| Curated panel data | <https://cannlytics.page.link/reported-effects> |
| Potential strain effects data | <https://cannlytics.page.link/strain-effects> |
<!-- TODO: Add WA and CT (OH?) datasets :) -->
## Methodology
A [multivariate probit model](https://en.wikipedia.org/wiki/Multivariate_probit_model) is used to predict the probability of all potential effects and aromas simultaneously given lab results for a sample or samples. Specific effects and aromas are predicted to be reported when the estimated probability of an effect or aroma crosses a threshold. The thresholds are set to best fit the observed occurrence of each effect and aroma. Below are the variates used in the models estimated.
```json
{
"full": [
"cbc",
"cbd",
"cbda",
"cbg",
"cbga",
"cbn",
"delta_8_thc",
"delta_9_thc",
"thca",
"thcv",
"alpha_bisabolol",
"alpha_pinene",
"alpha_terpinene",
"beta_caryophyllene",
"beta_myrcene",
"beta_pinene",
"camphene",
"carene",
"caryophyllene_oxide",
"d_limonene",
"eucalyptol",
"gamma_terpinene",
"geraniol",
"guaiol",
"humulene",
"isopulegol",
"linalool",
"nerolidol",
"ocimene",
"p_cymene",
"terpinene",
"terpinolene"
],
"terpene_only": [
"alpha_bisabolol",
"alpha_pinene",
"alpha_terpinene",
"beta_caryophyllene",
"beta_myrcene",
"beta_pinene",
"camphene",
"carene",
"caryophyllene_oxide",
"d_limonene",
"eucalyptol",
"gamma_terpinene",
"geraniol",
"guaiol",
"humulene",
"isopulegol",
"linalool",
"nerolidol",
"ocimene",
"p_cymene",
"terpinene",
"terpinolene"
],
"cannabinoid_only": [
"cbc",
"cbd",
"cbda",
"cbg",
"cbga",
"cbn",
"delta_8_thc",
"delta_9_thc",
"thca",
"thcv"
],
"totals": ["total_cbd", "total_thc", "total_terpenes"],
"simple": ["total_cbd", "total_thc"]
}
```
## Results
An implementation of the prediction model can be found at <https://cannlytics.com/effects> and utilized through the API endpoint <https://cannlytics.com/api/stats/effects>. In general, there are 3 main actions:
1. You can use the model to predict potentially reported effects and aromas for any cannabis flower for which you have lab results. Simply post your lab results to the `/stats/effects` endpoint, specifying your model if you desire, and you will receive effect and aroma predictions.
2. You can get the model statistics by making a `GET` request to `/stats/effects`. Currently, the model statistics include `false_positive_rate`, `false_negative_rate`, `true_positive_rate`, `true_negative_rate`, `accuracy`, and `informedness`.
3. Finally, you can post the actual effects and aromas that you may observe with the `/stats/effects/actual` endpoint.
You can substitute training data, for strain reviews or lab results, as you see fit. Please see the API documentation for more information about using this API endpoint.
## Insights and future work
The more training data the better. If you want to [contribute lab results or reviews](https://cannlytics.com/stats/effects), then you are welcome! You can also use your own training data. Using the model to predict out-of-sample helps make the model robust. Please feel free to report your use of the model and its accuracy in the wild to <[email protected]>. Lastly, but most importantly, remember that the predictions are for the probability of effects and aromas being reported by the observed sample given observed lab results. Extrapolations beyond the ranges of observed values aren't valid and all statistics should be taken at face value. Thank you and good fortune!
## Disclaimer
```
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
```
|
ArtemisZealot/DialoGTP-small-Qkarin | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | 2022-07-14T21:02:40Z | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: roberta-base-cv-studio_name-medium
results: []
widget:
- text: "Egresado de la carrera Ingenierรญa en Computaciรณn Conocimientos de lenguajes HTML, CSS, Javascript y MySQL. Experiencia trabajando en รกmbitos de redes de pequeรฑa y mediana escala. Inglรฉs Hablado nivel bรกsico, escrito nivel intermedio.HTML, CSS y JavaScript. Realidad aumentada. Lenguaje R. HTML5, JavaScript y Nodejs"
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-cased-cv-studio_name-medium
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
## Model description
Predicts a studio name based on a CV text
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 20
- num_epochs: 10
### Framework versions
- Transformers 4.19.0
- Pytorch 1.8.2+cu111
- Datasets 1.6.2
- Tokenizers 0.12.1
|
Ashagi/Ashvx | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- paraphrasing
- generated_from_trainer
model-index:
- name: t5-paraphrase-paws-msrp-opinosis-finetuned-parasci
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-paraphrase-paws-msrp-opinosis-finetuned-parasci
This model is a fine-tuned version of [ceshine/t5-paraphrase-paws-msrp-opinosis](https://huggingface.co/ceshine/t5-paraphrase-paws-msrp-opinosis) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5.6e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.18.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Ashl3y/model_name | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- flair
- token-classification
- sequence-tagger-model
language: en
datasets:
- conll2003
widget:
- text: "George Washington went to Washington"
---
This is a very small model I use for testing my [ner eval dashboard](https://github.com/helpmefindaname/ner-eval-dashboard)
F1-Score: **48,73** (CoNLL-03)
Predicts 4 tags:
| **tag** | **meaning** |
|---------------------------------|-----------|
| PER | person name |
| LOC | location name |
| ORG | organization name |
| MISC | other name |
Based on huggingface minimal testing embeddings
---
### Demo: How to use in Flair
Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`)
```python
from flair.data import Sentence
from flair.models import SequenceTagger
# load tagger
tagger = SequenceTagger.load("helpmefindaname/mini-sequence-tagger-conll03")
# make example sentence
sentence = Sentence("George Washington went to Washington")
# predict NER tags
tagger.predict(sentence)
# print sentence
print(sentence)
# print predicted NER spans
print('The following NER tags are found:')
# iterate over entities and print
for entity in sentence.get_spans('ner'):
print(entity)
```
This yields the following output:
```
Span [1,2]: "George Washington" [โ Labels: PER (1.0)]
Span [5]: "Washington" [โ Labels: LOC (1.0)]
```
So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington went to Washington*".
---
### Training: Script to train this model
The following command was used to train this model:
where `examples\ner\run_ner.py` refers to [this script](https://github.com/flairNLP/flair/blob/master/examples/ner/run_ner.py)
```
python examples\ner\run_ner.py --model_name_or_path hf-internal-testing/tiny-random-bert --dataset_name CONLL_03 --learning_rate 0.002 --mini_batch_chunk_size 1024 --batch_size 64 --num_epochs 100
```
--- |
Augustvember/WokkaBot4 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: pixel-base-finetuned-qnli
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.8859600951857953
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pixel-base-finetuned-qnli
This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9503
- Accuracy: 0.8860
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 64
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 100
- training_steps: 15000
- mixed_precision_training: Apex, opt level O1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 0.5451 | 0.31 | 500 | 0.5379 | 0.7282 |
| 0.4451 | 0.61 | 1000 | 0.3846 | 0.8318 |
| 0.4567 | 0.92 | 1500 | 0.3543 | 0.8525 |
| 0.3558 | 1.22 | 2000 | 0.3294 | 0.8638 |
| 0.3324 | 1.53 | 2500 | 0.3221 | 0.8666 |
| 0.3434 | 1.83 | 3000 | 0.2976 | 0.8774 |
| 0.2573 | 2.14 | 3500 | 0.3193 | 0.8750 |
| 0.2411 | 2.44 | 4000 | 0.3044 | 0.8794 |
| 0.253 | 2.75 | 4500 | 0.2932 | 0.8834 |
| 0.1653 | 3.05 | 5000 | 0.3364 | 0.8841 |
| 0.1662 | 3.36 | 5500 | 0.3348 | 0.8797 |
| 0.1816 | 3.67 | 6000 | 0.3440 | 0.8869 |
| 0.1699 | 3.97 | 6500 | 0.3453 | 0.8845 |
| 0.1027 | 4.28 | 7000 | 0.4277 | 0.8810 |
| 0.0987 | 4.58 | 7500 | 0.4590 | 0.8832 |
| 0.0974 | 4.89 | 8000 | 0.4311 | 0.8783 |
| 0.0669 | 5.19 | 8500 | 0.5214 | 0.8819 |
| 0.0583 | 5.5 | 9000 | 0.5776 | 0.8850 |
| 0.065 | 5.8 | 9500 | 0.5646 | 0.8821 |
| 0.0381 | 6.11 | 10000 | 0.6252 | 0.8796 |
| 0.0314 | 6.41 | 10500 | 0.7222 | 0.8801 |
| 0.0453 | 6.72 | 11000 | 0.6951 | 0.8823 |
| 0.0264 | 7.03 | 11500 | 0.7620 | 0.8828 |
| 0.0215 | 7.33 | 12000 | 0.8160 | 0.8834 |
| 0.0176 | 7.64 | 12500 | 0.8583 | 0.8828 |
| 0.0245 | 7.94 | 13000 | 0.8484 | 0.8867 |
| 0.0124 | 8.25 | 13500 | 0.8927 | 0.8836 |
| 0.0112 | 8.55 | 14000 | 0.9368 | 0.8827 |
| 0.0154 | 8.86 | 14500 | 0.9405 | 0.8860 |
| 0.0046 | 9.16 | 15000 | 0.9503 | 0.8860 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0+cu113
- Datasets 2.0.0
- Tokenizers 0.11.6
|
Augustvember/WokkaBot9 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
tags:
- generated_from_trainer
datasets:
- glue
model-index:
- name: pixel-base-finetuned-stsb
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pixel-base-finetuned-stsb
This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the GLUE STSB dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 8
- seed: 2
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 100
- training_steps: 15000
- mixed_precision_training: Apex, opt level O1
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.12.1
|
Augustvember/WokkaBot99 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
tags:
- generated_from_trainer
datasets:
- glue
model-index:
- name: pixel-base-finetuned-wnli
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pixel-base-finetuned-wnli
This model is a fine-tuned version of [Team-PIXEL/pixel-base](https://huggingface.co/Team-PIXEL/pixel-base) on the GLUE WNLI dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 64
- eval_batch_size: 8
- seed: 3
- gradient_accumulation_steps: 4
- total_train_batch_size: 256
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- training_steps: 400
- mixed_precision_training: Apex, opt level O1
### Framework versions
- Transformers 4.17.0
- Pytorch 1.11.0
- Datasets 2.0.0
- Tokenizers 0.12.1
|
Ayham/distilbert_roberta_summarization_cnn_dailymail | [
"pytorch",
"tensorboard",
"encoder-decoder",
"text2text-generation",
"dataset:cnn_dailymail",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] | text2text-generation | {
"architectures": [
"EncoderDecoderModel"
],
"model_type": "encoder-decoder",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 14 | 2022-07-15T07:42:23Z | ---
license: apache-2.0
library_name: sklearn
tags:
- tabular-classification
- baseline-trainer
---
## Baseline Model trained on irisg444_4c0 to apply classification on Species
**Metrics of the best model:**
accuracy 0.953333
recall_macro 0.953333
precision_macro 0.956229
f1_macro 0.953216
Name: LogisticRegression(class_weight='balanced', max_iter=1000), dtype: float64
**See model plot below:**
<style>#sk-container-id-2 {color: black;background-color: white;}#sk-container-id-2 pre{padding: 0;}#sk-container-id-2 div.sk-toggleable {background-color: white;}#sk-container-id-2 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-2 label.sk-toggleable__label-arrow:before {content: "โธ";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-2 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-2 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-2 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-2 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-2 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-2 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "โพ";}#sk-container-id-2 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-2 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-2 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-2 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-2 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-2 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-2 div.sk-item {position: relative;z-index: 1;}#sk-container-id-2 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-2 div.sk-item::before, #sk-container-id-2 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-2 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-2 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-2 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-2 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-2 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-2 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-2 div.sk-label-container {text-align: center;}#sk-container-id-2 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-2 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-2" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[('easypreprocessor',EasyPreprocessor(types= continuous dirty_float ... free_string useless
SepalLengthCm True False ... False False
SepalWidthCm True False ... False False
PetalLengthCm True False ... False False
PetalWidthCm True False ... False False[4 rows x 7 columns])),('logisticregression',LogisticRegression(C=1, class_weight='balanced',max_iter=1000))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-4" type="checkbox" ><label for="sk-estimator-id-4" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[('easypreprocessor',EasyPreprocessor(types= continuous dirty_float ... free_string useless
SepalLengthCm True False ... False False
SepalWidthCm True False ... False False
PetalLengthCm True False ... False False
PetalWidthCm True False ... False False[4 rows x 7 columns])),('logisticregression',LogisticRegression(C=1, class_weight='balanced',max_iter=1000))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-5" type="checkbox" ><label for="sk-estimator-id-5" class="sk-toggleable__label sk-toggleable__label-arrow">EasyPreprocessor</label><div class="sk-toggleable__content"><pre>EasyPreprocessor(types= continuous dirty_float ... free_string useless
SepalLengthCm True False ... False False
SepalWidthCm True False ... False False
PetalLengthCm True False ... False False
PetalWidthCm True False ... False False[4 rows x 7 columns])</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-6" type="checkbox" ><label for="sk-estimator-id-6" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression(C=1, class_weight='balanced', max_iter=1000)</pre></div></div></div></div></div></div></div>
**Disclaimer:** This model is trained with dabl library as a baseline, for better results, use [AutoTrain](https://huggingface.co/autotrain).
**Logs of training** including the models tried in the process can be found in logs.txt |
Ayham/roberta_bert_summarization_cnn_dailymail | [
"pytorch",
"tensorboard",
"encoder-decoder",
"text2text-generation",
"dataset:cnn_dailymail",
"transformers",
"generated_from_trainer",
"autotrain_compatible"
] | text2text-generation | {
"architectures": [
"EncoderDecoderModel"
],
"model_type": "encoder-decoder",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | 2022-07-15T08:09:49Z | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.de
metrics:
- name: F1
type: f1
value: 0.8648740833380706
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1365
- F1: 0.8649
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2553 | 1.0 | 525 | 0.1575 | 0.8279 |
| 0.1284 | 2.0 | 1050 | 0.1386 | 0.8463 |
| 0.0813 | 3.0 | 1575 | 0.1365 | 0.8649 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.0+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
Aymene/opus-mt-en-ro-finetuned-en-to-ro | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | MC-BERT is a novel conceptualized representation learning approach for the medical domain. First, we use a different mask generation procedure to mask spans of tokens, rather than only random ones. We also introduce two kinds of masking strategies, namely whole entity masking and whole span masking. Finally, MC-BERT split the input document into segments based on the actual "sentences" provided by the user as positive samples and sample random sentences from other documents as negative samples for the next sentence prediction.

More detail:
https://github.com/alibaba-research/ChineseBLUE |
Ayoola/wav2vec2-large-xlsr-turkish-demo-colab | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_keras_callback
model-index:
- name: xtremedistil-l6-h256-uncased-future-time-references-D1
results: []
datasets:
- jonaskoenig/trump_administration_statement
- jonaskoenig/future-time-references-static-filter-D1
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# xtremedistil-l6-h256-uncased-future-time-references-D1
This model is a fine-tuned version of [microsoft/xtremedistil-l6-h256-uncased](https://huggingface.co/microsoft/xtremedistil-l6-h256-uncased) on the [jonaskoenig/trump_administration_statement](https://huggingface.co/datasets/jonaskoenig/trump_administration_statement) and [jonaskoenig/future-time-refernces-static-filter](https://huggingface.co/datasets/jonaskoenig/future-time-refernces-static-filter) datsets.
It achieves the following results on the evaluation set:
- Train Loss: 0.0099
- Train Sparse Categorical Accuracy: 0.9977
- Validation Loss: 0.0128
- Validation Sparse Categorical Accuracy: 0.9976
- Epoch: 3
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': 5e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Train Sparse Categorical Accuracy | Validation Loss | Validation Sparse Categorical Accuracy | Epoch |
|:----------:|:---------------------------------:|:---------------:|:--------------------------------------:|:-----:|
| 0.0276 | 0.9932 | 0.0156 | 0.9968 | 0 |
| 0.0138 | 0.9969 | 0.0125 | 0.9972 | 1 |
| 0.0117 | 0.9974 | 0.0126 | 0.9974 | 2 |
| 0.0099 | 0.9977 | 0.0128 | 0.9976 | 3 |
The test accuracy is: 99.77%
### Framework versions
- Transformers 4.20.1
- TensorFlow 2.9.1
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Ayran/DialoGPT-medium-harry-1 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-15T11:30:35Z | ---
language: en
tags:
- btcv
- medical
- swin
license: apache-2.0
datasets:
- BTCV
---
# Model Overview
This repository contains the code for Swin UNETR [1,2]. Swin UNETR is the state-of-the-art on Medical Segmentation
Decathlon (MSD) and Beyond the Cranial Vault (BTCV) Segmentation Challenge dataset. In [1], a novel methodology is devised for pre-training Swin UNETR backbone in a self-supervised
manner. We provide the option for training Swin UNETR by fine-tuning from pre-trained self-supervised weights or from scratch.
The source repository for the training of these models can be found [here](https://github.com/Project-MONAI/research-contributions/tree/main/SwinUNETR/BTCV).
# Installing Dependencies
Dependencies for training and inference can be installed using the model requirements :
``` bash
pip install -r requirements.txt
```
# Intended uses & limitations
You can use the raw model for dicom segmentation, but it's mostly intended to be fine-tuned on a downstream task.
Note that this model is primarily aimed at being fine-tuned on tasks which segment CAT scans or MRIs on images in dicom format. Dicom meta data mostly differs across medical facilities, so if applying to a new dataset, the model should be finetuned.
# How to use
To install necessary dependencies, run the below in bash.
```
git clone https://github.com/darraghdog/Project-MONAI-research-contributions pmrc
pip install -r pmrc/requirements.txt
cd pmrc/SwinUNETR/BTCV
```
To load the model from the hub.
```
>>> from swinunetr import SwinUnetrModelForInference
>>> model = SwinUnetrModelForInference.from_pretrained('darragh/swinunetr-btcv-tiny')
```
# Limitations and bias
The training data used for this model is specific to CAT scans from certain health facilities and machines. Data from other facilities may difffer in image distributions, and may require finetuning of the models for best performance.
# Evaluation results
We provide several pre-trained models on BTCV dataset in the following.
<table>
<tr>
<th>Name</th>
<th>Dice (overlap=0.7)</th>
<th>Dice (overlap=0.5)</th>
<th>Feature Size</th>
<th># params (M)</th>
<th>Self-Supervised Pre-trained </th>
</tr>
<tr>
<td>Swin UNETR/Base</td>
<td>82.25</td>
<td>81.86</td>
<td>48</td>
<td>62.1</td>
<td>Yes</td>
</tr>
<tr>
<td>Swin UNETR/Small</td>
<td>79.79</td>
<td>79.34</td>
<td>24</td>
<td>15.7</td>
<td>No</td>
</tr>
<tr>
<td>Swin UNETR/Tiny</td>
<td>72.05</td>
<td>70.35</td>
<td>12</td>
<td>4.0</td>
<td>No</td>
</tr>
</table>
# Data Preparation

The training data is from the [BTCV challenge dataset](https://www.synapse.org/#!Synapse:syn3193805/wiki/217752).
- Target: 13 abdominal organs including 1. Spleen 2. Right Kidney 3. Left Kideny 4.Gallbladder 5.Esophagus 6. Liver 7. Stomach 8.Aorta 9. IVC 10. Portal and Splenic Veins 11. Pancreas 12.Right adrenal gland 13.Left adrenal gland.
- Task: Segmentation
- Modality: CT
- Size: 30 3D volumes (24 Training + 6 Testing)
# Training
See the source repository [here](https://github.com/Project-MONAI/research-contributions/tree/main/SwinUNETR/BTCV) for information on training.
# BibTeX entry and citation info
If you find this repository useful, please consider citing the following papers:
```
@inproceedings{tang2022self,
title={Self-supervised pre-training of swin transformers for 3d medical image analysis},
author={Tang, Yucheng and Yang, Dong and Li, Wenqi and Roth, Holger R and Landman, Bennett and Xu, Daguang and Nath, Vishwesh and Hatamizadeh, Ali},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={20730--20740},
year={2022}
}
@article{hatamizadeh2022swin,
title={Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images},
author={Hatamizadeh, Ali and Nath, Vishwesh and Tang, Yucheng and Yang, Dong and Roth, Holger and Xu, Daguang},
journal={arXiv preprint arXiv:2201.01266},
year={2022}
}
```
# References
[1]: Tang, Y., Yang, D., Li, W., Roth, H.R., Landman, B., Xu, D., Nath, V. and Hatamizadeh, A., 2022. Self-supervised pre-training of swin transformers for 3d medical image analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 20730-20740).
[2]: Hatamizadeh, A., Nath, V., Tang, Y., Yang, D., Roth, H. and Xu, D., 2022. Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images. arXiv preprint arXiv:2201.01266.
|
Ayran/DialoGPT-medium-harry-potter-1-through-4-plus-6 | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
tags:
- FrozenLake-v1-4x4-no_slippery
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-FrozenLake-v1-4x4-noSlippery
results:
- metrics:
- type: mean_reward
value: 1.00 +/- 0.00
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-4x4-no_slippery
type: FrozenLake-v1-4x4-no_slippery
---
# **Q-Learning** Agent playing **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="spacestar1705/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"])
```
|
Azaghast/GPT2-SCP-Descriptions | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | 2022-07-15T13:27:33Z | ---
language: "pt"
widget:
- text: "O paciente recebeu no hospital e falou com a mรฉdica"
datasets:
- MacMorpho
---
# Postagger Bio Portuguese
## Citation
```
coming soon
```
|
BSC-LT/roberta-base-ca | [
"pytorch",
"roberta",
"fill-mask",
"ca",
"transformers",
"masked-lm",
"BERTa",
"catalan",
"license:apache-2.0",
"autotrain_compatible"
] | fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 18 | null | ---
tags:
- generated_from_trainer
model-index:
- name: mbart-large-50-finetuned-opus-en-pt-translation-finetuned-english-to-portuguese-handmade-dataset
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mbart-large-50-finetuned-opus-en-pt-translation-finetuned-english-to-portuguese-handmade-dataset
This model is a fine-tuned version of [Narrativa/mbart-large-50-finetuned-opus-en-pt-translation](https://huggingface.co/Narrativa/mbart-large-50-finetuned-opus-en-pt-translation) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|
| No log | 1.0 | 22 | 0.8052 | 64.2749 | 11.9231 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Backedman/DialoGPT-small-Anika | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 6 | 2022-07-15T17:28:35Z | ---
license: apache-2.0
tags:
- pytorch
- diffusers
- unconditional-image-generation
---
# Latent Diffusion Models (LDM)
**Paper**: [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)
**Abstract**:
*By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.*
**Authors**
*Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Bjรถrn Ommer*
## Usage
### Inference with a pipeline
```python
!pip install diffusers
from diffusers import DiffusionPipeline
model_id = "CompVis/ldm-celebahq-256"
# load model and scheduler
pipeline = DiffusionPipeline.from_pretrained(model_id)
# run pipeline in inference (sample random noise and denoise)
image = pipeline(num_inference_steps=200)["sample"]
# save image
image[0].save("ldm_generated_image.png")
```
### Inference with an unrolled loop
```python
!pip install diffusers
from diffusers import UNet2DModel, DDIMScheduler, VQModel
import torch
import PIL.Image
import numpy as np
import tqdm
seed = 3
# load all models
unet = UNet2DModel.from_pretrained("CompVis/ldm-celebahq-256", subfolder="unet")
vqvae = VQModel.from_pretrained("CompVis/ldm-celebahq-256", subfolder="vqvae")
scheduler = DDIMScheduler.from_config("CompVis/ldm-celebahq-256", subfolder="scheduler")
# set to cuda
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
unet.to(torch_device)
vqvae.to(torch_device)
# generate gaussian noise to be decoded
generator = torch.manual_seed(seed)
noise = torch.randn(
(1, unet.in_channels, unet.sample_size, unet.sample_size),
generator=generator,
).to(torch_device)
# set inference steps for DDIM
scheduler.set_timesteps(num_inference_steps=200)
image = noise
for t in tqdm.tqdm(scheduler.timesteps):
# predict noise residual of previous image
with torch.no_grad():
residual = unet(image, t)["sample"]
# compute previous image x_t according to DDIM formula
prev_image = scheduler.step(residual, t, image, eta=0.0)["prev_sample"]
# x_t-1 -> x_t
image = prev_image
# decode image with vae
with torch.no_grad():
image = vqvae.decode(image)
# process image
image_processed = image.cpu().permute(0, 2, 3, 1)
image_processed = (image_processed + 1.0) * 127.5
image_processed = image_processed.clamp(0, 255).numpy().astype(np.uint8)
image_pil = PIL.Image.fromarray(image_processed[0])
image_pil.save(f"generated_image_{seed}.png")
```
## Samples
1. 
2. 
3. 
4. 
|
Badr/model1 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-15T17:31:56Z | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1596
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 1.2265 | 1.0 | 5533 | 1.1572 |
| 0.9548 | 2.0 | 11066 | 1.1278 |
| 0.7396 | 3.0 | 16599 | 1.1596 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Bagus/ser-japanese | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-15T17:35:30Z | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 239.43 +/- 17.03
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Bagus/wav2vec2-xlsr-greek-speech-emotion-recognition | [
"pytorch",
"tensorboard",
"wav2vec2",
"el",
"dataset:aesdd",
"transformers",
"audio",
"audio-classification",
"speech",
"license:apache-2.0"
] | audio-classification | {
"architectures": [
"Wav2Vec2ForSpeechClassification"
],
"model_type": "wav2vec2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 21 | 2022-07-15T17:51:30Z | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: t5-base-finetuned-emo20q
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-base-finetuned-emo20q
This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
| No log | 1.0 | 280 | 2.0507 | 58.2896 | 0.0 | 58.1047 | 58.2444 | 2.0 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Bakkes/BakkesModWiki | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-15T18:21:06Z | ---
tags:
- text-to-speech
- gronings
- FastSpeech 2
language: gos
datasets:
- gronings
license: afl-3.0
---
## GroTTS Model
This model was trained with the [FastSpeech 2](https://arxiv.org/abs/2006.04558) architecture using approx. 2 hours of Gronings TTS dataset. For the best results, you need to download the vocoder separately from [here](https://huggingface.co/ahnafsamin/parallelwavegan-gronings) and then use the following code:
```
from espnet2.bin.tts_inference import Text2Speech
from scipy.io.wavfile import write
model = Text2Speech.from_pretrained(
model_file="path_to_the_model_file_in_pth_format",
vocoder_file="path_to_the_vocoder_file_in_pkl_format"
)
output = model("This is a simple test.")
write("x.wav", 22050, output['wav'].numpy())
```
The GroTTS model is deployed [here](https://huggingface.co/spaces/ahnafsamin/GroTTS-FastSpeech2).
## TTS config
<details><summary>expand</summary>
```
config: conf/tuning/train_fastspeech2.yaml
print_config: false
log_level: INFO
dry_run: false
iterator_type: sequence
output_dir: exp/tts_train_fastspeech2_raw_char_tacotron
ngpu: 1
seed: 0
num_workers: 1
num_att_plot: 3
dist_backend: nccl
dist_init_method: env://
dist_world_size: null
dist_rank: null
local_rank: 0
dist_master_addr: null
dist_master_port: null
dist_launcher: null
multiprocessing_distributed: false
unused_parameters: false
sharded_ddp: false
cudnn_enabled: true
cudnn_benchmark: false
cudnn_deterministic: true
collect_stats: false
write_collected_feats: false
max_epoch: 1000
patience: null
val_scheduler_criterion:
- valid
- loss
early_stopping_criterion:
- valid
- loss
- min
best_model_criterion:
- - valid
- loss
- min
- - train
- loss
- min
keep_nbest_models: 5
nbest_averaging_interval: 0
grad_clip: 1.0
grad_clip_type: 2.0
grad_noise: false
accum_grad: 8
no_forward_run: false
resume: true
train_dtype: float32
use_amp: false
log_interval: null
use_matplotlib: true
use_tensorboard: true
use_wandb: false
wandb_project: null
wandb_id: null
wandb_entity: null
wandb_name: null
wandb_model_log_interval: -1
detect_anomaly: false
pretrain_path: null
init_param: []
ignore_init_mismatch: false
freeze_param: []
num_iters_per_epoch: 800
batch_size: 20
valid_batch_size: null
batch_bins: 3000000
valid_batch_bins: null
train_shape_file:
- exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/text_shape.char
- exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/speech_shape
valid_shape_file:
- exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/text_shape.char
- exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/speech_shape
batch_type: numel
valid_batch_type: null
fold_length:
- 150
- 204800
sort_in_batch: descending
sort_batch: descending
multiple_iterator: false
chunk_length: 500
chunk_shift_ratio: 0.5
num_cache_chunks: 1024
train_data_path_and_name_and_type:
- - dump/raw/tr_no_dev/text
- text
- text
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/tr_no_dev/durations
- durations
- text_int
- - dump/raw/tr_no_dev/wav.scp
- speech
- sound
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/collect_feats/pitch.scp
- pitch
- npy
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/collect_feats/energy.scp
- energy
- npy
valid_data_path_and_name_and_type:
- - dump/raw/dev/text
- text
- text
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/dev/durations
- durations
- text_int
- - dump/raw/dev/wav.scp
- speech
- sound
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/collect_feats/pitch.scp
- pitch
- npy
- - exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/valid/collect_feats/energy.scp
- energy
- npy
allow_variable_data_keys: false
max_cache_size: 0.0
max_cache_fd: 32
valid_max_cache_size: null
optim: adam
optim_conf:
lr: 1.0
scheduler: noamlr
scheduler_conf:
model_size: 384
warmup_steps: 4000
token_list:
- <blank>
- <unk>
- <space>
- E
- N
- A
- O
- T
- I
- R
- D
- L
- S
- K
- M
- G
- U
- H
- .
- W
- V
- Z
- P
- B
- ','
- J
- C
- F
- '?'
- ''''
- '!'
- Y
- X
- '`'
- <sos/eos>
odim: null
model_conf: {}
use_preprocessor: true
token_type: char
bpemodel: null
non_linguistic_symbols: null
cleaner: tacotron
g2p: g2p_en
feats_extract: fbank
feats_extract_conf:
n_fft: 1024
hop_length: 256
win_length: null
fs: 22050
fmin: 80
fmax: 7600
n_mels: 80
normalize: global_mvn
normalize_conf:
stats_file: exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/feats_stats.npz
tts: fastspeech2
tts_conf:
adim: 384
aheads: 2
elayers: 4
eunits: 1536
dlayers: 4
dunits: 1536
positionwise_layer_type: conv1d
positionwise_conv_kernel_size: 3
duration_predictor_layers: 2
duration_predictor_chans: 256
duration_predictor_kernel_size: 3
postnet_layers: 5
postnet_filts: 5
postnet_chans: 256
use_masking: true
use_scaled_pos_enc: true
encoder_normalize_before: true
decoder_normalize_before: true
reduction_factor: 1
init_type: xavier_uniform
init_enc_alpha: 1.0
init_dec_alpha: 1.0
transformer_enc_dropout_rate: 0.2
transformer_enc_positional_dropout_rate: 0.2
transformer_enc_attn_dropout_rate: 0.2
transformer_dec_dropout_rate: 0.2
transformer_dec_positional_dropout_rate: 0.2
transformer_dec_attn_dropout_rate: 0.2
pitch_predictor_layers: 5
pitch_predictor_chans: 256
pitch_predictor_kernel_size: 5
pitch_predictor_dropout: 0.5
pitch_embed_kernel_size: 1
pitch_embed_dropout: 0.0
stop_gradient_from_pitch_predictor: true
energy_predictor_layers: 2
energy_predictor_chans: 256
energy_predictor_kernel_size: 3
energy_predictor_dropout: 0.5
energy_embed_kernel_size: 1
energy_embed_dropout: 0.0
stop_gradient_from_energy_predictor: false
pitch_extract: dio
pitch_extract_conf:
fs: 22050
n_fft: 1024
hop_length: 256
f0max: 400
f0min: 80
reduction_factor: 1
pitch_normalize: global_mvn
pitch_normalize_conf:
stats_file: exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/pitch_stats.npz
energy_extract: energy
energy_extract_conf:
fs: 22050
n_fft: 1024
hop_length: 256
win_length: null
reduction_factor: 1
energy_normalize: global_mvn
energy_normalize_conf:
stats_file: exp/tts_train_raw_char_tacotron/decode_use_teacher_forcingtrue_train.loss.ave/stats/train/energy_stats.npz
required:
- output_dir
- token_list
version: 0.10.7a1
distributed: false
```
</details> |
Banshee/dialoGPT-small-luke | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- image-classification
- generated_from_trainer
datasets:
- imagefolder
metrics:
- accuracy
model-index:
- name: finetuned-indian-food
results:
- task:
name: Image Classification
type: image-classification
dataset:
name: indian_food_images
type: imagefolder
config: default
split: train
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.9330499468650372
widget:
- src: https://huggingface.co/rajistics/finetuned-indian-food/resolve/main/003.jpg
example_title: fried_rice
- src: https://huggingface.co/rajistics/finetuned-indian-food/resolve/main/126.jpg
example_title: paani_puri
- src: https://huggingface.co/rajistics/finetuned-indian-food/resolve/main/401.jpg
example_title: chapati
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# finetuned-indian-food
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the indian_food_images dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2632
- Accuracy: 0.9330
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0002
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 1.1794 | 0.3 | 100 | 0.9208 | 0.8565 |
| 0.6513 | 0.6 | 200 | 0.5410 | 0.8842 |
| 0.5904 | 0.9 | 300 | 0.4978 | 0.8799 |
| 0.4461 | 1.2 | 400 | 0.3669 | 0.9192 |
| 0.5633 | 1.5 | 500 | 0.4340 | 0.8842 |
| 0.2489 | 1.8 | 600 | 0.3355 | 0.9171 |
| 0.3171 | 2.1 | 700 | 0.3286 | 0.9192 |
| 0.3785 | 2.4 | 800 | 0.3232 | 0.9171 |
| 0.2278 | 2.7 | 900 | 0.3338 | 0.9192 |
| 0.0894 | 3.0 | 1000 | 0.2870 | 0.9245 |
| 0.2092 | 3.3 | 1100 | 0.2884 | 0.9288 |
| 0.1466 | 3.6 | 1200 | 0.2673 | 0.9320 |
| 0.1789 | 3.9 | 1300 | 0.2632 | 0.9330 |
### Framework versions
- Transformers 4.21.1
- Pytorch 1.12.0+cu113
- Datasets 2.4.0
- Tokenizers 0.12.1
|
Battlehooks/distilbert-base-uncased-finetuned-squad | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
library_name: nemo
datasets:
- librispeech_asr
thumbnail: null
tags:
- automatic-speech-recognition
- speech
- audio
- CTC
- Citrinet
- Transformer
- pytorch
- NeMo
- hf-asr-leaderboard
license: cc-by-4.0
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
model-index:
- name: stt_en_citrinet_384_ls
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (clean)
type: librispeech_asr
config: clean
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 3.2
- task:
type: Automatic Speech Recognition
name: automatic-speech-recognition
dataset:
name: LibriSpeech (other)
type: librispeech_asr
config: other
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 7.9
---
# NVIDIA Citrinet CTC 384 Librispeech (en-US)
<style>
img {
display: inline;
}
</style>
| [](#model-architecture)
| [](#model-architecture)
| [](#datasets)
| [](#deployment-with-nvidia-riva) |
This model transcribes speech in lower case English alphabet along with spaces and apostrophes.
It is an "small" versions of Citrinet-CTC (around 21 parameters) model.
See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#citrinet) for complete architecture details.
It is also compatible with NVIDIA Riva for [production-grade server deployments](#deployment-with-nvidia-riva).
## NVIDIA NeMo: Training
To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest Pytorch version.
```
pip install nemo_toolkit['all']
```
## How to Use this Model
The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset.
### Automatically instantiate the model
```python
import nemo.collections.asr as nemo_asr
asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("nvidia/stt_en_citrinet_384_ls")
```
### Transcribing using Python
First, let's get a sample
```
wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav
```
Then simply do:
```
asr_model.transcribe(['2086-149220-0033.wav'])
```
### Transcribing many audio files
```shell
python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py
pretrained_name="nvidia/stt_en_citrinet_384_ls"
audio_dir="<DIRECTORY CONTAINING AUDIO FILES>"
```
### Input
This model accepts 16000 KHz Mono-channel Audio (wav files) as input.
### Output
This model provides transcribed speech as a string for a given audio sample.
## Model Architecture
Citrinet-CTC model is an autoregressive variant of Citrinet model [1] for Automatic Speech Recognition which uses CTC loss/decoding instead of Transducer Loss. You may find more info on the detail of this model here: [Citrinet Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html).
## Training
The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/citrinet/citrinet_1024.yaml) (Note: Change the `model.model_defaults.filters` to match the model size).
The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py).
### Datasets
All the models in this collection are trained on a just the Librispeech Dataset:
- Librispeech 960 hours of English speech
## Performance
The list of the available models in this collection is shown in the following table. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding.
| Version | Tokenizer | Vocabulary Size | LS test-other | LS test-clean |
|---------|---------------------------|-----------------|---------------|---------------|
| 1.0.0 | SentencePiece Unigram [2] | 256 | 7.9 | 3.2 |
## Limitations
Since this model was trained on publicly available speech datasets, the performance of this model might degrade for speech which includes technical terms, or vernacular that the model has not been trained on. The model might also perform worse for accented speech.
## Deployment with NVIDIA Riva
For the best real-time accuracy, latency, and throughput, deploy the model with [NVIDIA Riva](https://developer.nvidia.com/riva), an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, at the edge, and embedded.
Additionally, Riva provides:
* World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours
* Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization
* Streaming speech recognition, Kubernetes compatible scaling, and Enterprise-grade support
Check out [Riva live demo](https://developer.nvidia.com/riva#demos).
## References
[1] [ Citrinet: Closing the Gap between Non-Autoregressive and Autoregressive End-to-End Models for Automatic Speech Recognition](https://arxiv.org/abs/2104.01721)
[2] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece)
[3] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
## Licence
License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license. |
BatuhanYilmaz/bert-finetuned-nerxD | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
library_name: nemo
datasets:
- librispeech_asr
thumbnail: null
tags:
- automatic-speech-recognition
- speech
- audio
- CTC
- Citrinet
- Transformer
- pytorch
- NeMo
- hf-asr-leaderboard
license: cc-by-4.0
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
model-index:
- name: stt_en_citrinet_512_ls
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (clean)
type: librispeech_asr
config: clean
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 3.1
- task:
type: Automatic Speech Recognition
name: automatic-speech-recognition
dataset:
name: LibriSpeech (other)
type: librispeech_asr
config: other
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 7.8
---
# NVIDIA Citrinet CTC 512 Librispeech (en-US)
<style>
img {
display: inline;
}
</style>
| [](#model-architecture)
| [](#model-architecture)
| [](#datasets)
| [](#deployment-with-nvidia-riva) |
This model transcribes speech in the lower case English alphabet, spaces, and apostrophes.
It is an "medium" versions of Citrinet-CTC (around 36 parameters) model.
See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#citrinet) for complete architecture details.
It is also compatible with NVIDIA Riva for [production-grade server deployments](#deployment-with-nvidia-riva).
## NVIDIA NeMo: Training
To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest Pytorch version.
```
pip install nemo_toolkit['all']
```
## How to Use this Model
The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset.
### Automatically instantiate the model
```python
import nemo.collections.asr as nemo_asr
asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("nvidia/stt_en_citrinet_512_ls")
```
### Transcribing using Python
First, let's get a sample
```
wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav
```
Then simply do:
```
asr_model.transcribe(['2086-149220-0033.wav'])
```
### Transcribing many audio files
```shell
python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py
pretrained_name="nvidia/stt_en_citrinet_512_ls"
audio_dir="<DIRECTORY CONTAINING AUDIO FILES>"
```
### Input
This model accepts 16000 KHz Mono-channel Audio (wav files) as input.
### Output
This model provides transcribed speech as a string for a given audio sample.
## Model Architecture
Citrinet-CTC model is an autoregressive variant of Citrinet model [1] for Automatic Speech Recognition which uses CTC loss/decoding instead of Transducer Loss. You may find more info on the detail of this model here: [Citrinet Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html).
## Training
The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/citrinet/citrinet_1024.yaml) (Note: Change the `model.model_defaults.filters` to match the model size).
The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py).
### Datasets
All the models in this collection are trained on a just the Librispeech Dataset:
- Librispeech 960 hours of English speech
## Performance
The list of the available models in this collection is shown in the following table. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding.
| Version | Tokenizer | Vocabulary Size | LS test-other | LS test-clean |
|---------|---------------------------|-----------------|---------------|---------------|
| 1.0.0 | SentencePiece Unigram [2] | 256 | 7.8 | 3.1 |
## Limitations
Since this model was trained on publicly available speech datasets, the performance of this model might degrade for speech which includes technical terms, or vernacular that the model has not been trained on. The model might also perform worse for accented speech.
## Deployment with NVIDIA Riva
For the best real-time accuracy, latency, and throughput, deploy the model with [NVIDIA Riva](https://developer.nvidia.com/riva), an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, at the edge, and embedded.
Additionally, Riva provides:
* World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours
* Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization
* Streaming speech recognition, Kubernetes compatible scaling, and Enterprise-grade support
Check out [Riva live demo](https://developer.nvidia.com/riva#demos).
## References
[1] [ Citrinet: Closing the Gap between Non-Autoregressive and Autoregressive End-to-End Models for Automatic Speech Recognition](https://arxiv.org/abs/2104.01721)
[2] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece)
[3] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
## Licence
License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license. |
BatuhanYilmaz/code-search-net-tokenizer1 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
library_name: nemo
datasets:
- librispeech_asr
thumbnail: null
tags:
- automatic-speech-recognition
- speech
- audio
- CTC
- Citrinet
- Transformer
- pytorch
- NeMo
- hf-asr-leaderboard
license: cc-by-4.0
widget:
- example_title: Librispeech sample 1
src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
- example_title: Librispeech sample 2
src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
model-index:
- name: stt_en_citrinet_768_ls
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: LibriSpeech (clean)
type: librispeech_asr
config: clean
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 2.6
- task:
type: Automatic Speech Recognition
name: automatic-speech-recognition
dataset:
name: LibriSpeech (other)
type: librispeech_asr
config: other
split: test
args:
language: en
metrics:
- name: Test WER
type: wer
value: 6.4
---
# NVIDIA Citrinet CTC 768 Librispeech (en-US)
<style>
img {
display: inline;
}
</style>
| [](#model-architecture)
| [](#model-architecture)
| [](#datasets)
| [](#deployment-with-nvidia-riva) |
This model transcribes speech in lower case English alphabet along with spaces and apostrophes.
It is an "medium-large" versions of Citrinet-CTC (around 81M parameters) model.
See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#citrinet) for complete architecture details.
It is also compatible with NVIDIA Riva for [production-grade server deployments](#deployment-with-nvidia-riva).
## NVIDIA NeMo: Training
To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest Pytorch version.
```
pip install nemo_toolkit['all']
```
## How to Use this Model
The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset.
### Automatically instantiate the model
```python
import nemo.collections.asr as nemo_asr
asr_model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("nvidia/stt_en_citrinet_768_ls")
```
### Transcribing using Python
First, let's get a sample
```
wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav
```
Then simply do:
```
asr_model.transcribe(['2086-149220-0033.wav'])
```
### Transcribing many audio files
```shell
python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py
pretrained_name="nvidia/stt_en_citrinet_768_ls"
audio_dir="<DIRECTORY CONTAINING AUDIO FILES>"
```
### Input
This model accepts 16000 KHz Mono-channel Audio (wav files) as input.
### Output
This model provides transcribed speech as a string for a given audio sample.
## Model Architecture
Citrinet-CTC model is an autoregressive variant of Citrinet model [1] for Automatic Speech Recognition which uses CTC loss/decoding instead of Transducer Loss. You may find more info on the detail of this model here: [Citrinet Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html).
## Training
The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/citrinet/citrinet_1024.yaml) (Note: Change the `model.model_defaults.filters` to match the model size).
The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py).
### Datasets
All the models in this collection are trained on a just the Librispeech Dataset:
- Librispeech 960 hours of English speech
## Performance
The list of the available models in this collection is shown in the following table. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding.
| Version | Tokenizer | Vocabulary Size | LS test-other | LS test-clean |
|---------|---------------------------|-----------------|---------------|---------------|
| 1.0.0 | SentencePiece Unigram [2] | 256 | 6.4 | 2.6 |
## Limitations
Since this model was trained on publicly available speech datasets, the performance of this model might degrade for speech which includes technical terms, or vernacular that the model has not been trained on. The model might also perform worse for accented speech.
## Deployment with NVIDIA Riva
For the best real-time accuracy, latency, and throughput, deploy the model with [NVIDIA Riva](https://developer.nvidia.com/riva), an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, at the edge, and embedded.
Additionally, Riva provides:
* World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours
* Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization
* Streaming speech recognition, Kubernetes compatible scaling, and Enterprise-grade support
Check out [Riva live demo](https://developer.nvidia.com/riva#demos).
## References
[1] [ Citrinet: Closing the Gap between Non-Autoregressive and Autoregressive End-to-End Models for Automatic Speech Recognition](https://arxiv.org/abs/2104.01721)
[2] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece)
[3] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)
## Licence
License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license. |
BatuhanYilmaz/marian-finetuned-kde4-en-to-fr | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Pyramids
library_name: ml-agents
---
# **ppo** Agent playing **Pyramids**
This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
2. Step 1: Write your model_id: RaphaelReinauer/testpyramidsrnd
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play ๐
|
BatuhanYilmaz/mt5-small-finetuned-amazonbooks-en-es | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- becasv3
model-index:
- name: distilbert-base-uncased-modelo-becas0
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-modelo-becas0
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the becasv3 dataset.
It achieves the following results on the evaluation set:
- Loss: 3.1182
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 5 | 5.5381 |
| No log | 2.0 | 10 | 4.9493 |
| No log | 3.0 | 15 | 4.4985 |
| No log | 4.0 | 20 | 4.1063 |
| No log | 5.0 | 25 | 3.7708 |
| No log | 6.0 | 30 | 3.5205 |
| No log | 7.0 | 35 | 3.3313 |
| No log | 8.0 | 40 | 3.2195 |
| No log | 9.0 | 45 | 3.1453 |
| No log | 10.0 | 50 | 3.1182 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Beatriz/model_name | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | #####
## Bloom2.5B Zen ##
#####
Bloom (2.5 B) Scientific Model fine-tuned on Zen knowledge
#####
## Usage ##
#####
```python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
model = AutoModelForCausalLM.from_pretrained("MultiTrickFox/bloom-2b5_Zen")
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
inp = [ """Today""", """Yesterday""" ]
out = generator(
inp,
do_sample=True,
temperature=.7,
typical_p=.6,
#top_p=.9,
repetition_penalty=1.2,
max_new_tokens=666,
max_time=60, # seconds
)
for o in out: print(o[0]['generated_text'])
``` |
Bella4322/Sarah | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.de
metrics:
- name: F1
type: f1
value: 0.8640345886904085
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1426
- F1: 0.8640
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2525 | 1.0 | 787 | 0.1795 | 0.8184 |
| 0.1283 | 2.0 | 1574 | 0.1402 | 0.8468 |
| 0.08 | 3.0 | 2361 | 0.1426 | 0.8640 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Benicio/t5-small-finetuned-en-to-ro | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_keras_callback
model-index:
- name: dummy-model
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# dummy-model
This model is a fine-tuned version of [camembert-base](https://huggingface.co/camembert-base) on an unknown dataset.
It achieves the following results on the evaluation set:
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: None
- training_precision: float32
### Training results
### Framework versions
- Transformers 4.20.1
- TensorFlow 2.8.2
- Datasets 2.3.2
- Tokenizers 0.12.1
|
Betaniaolivo/Foto | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
---
This repo contains weights for some models from https://github.com/open-mmlab/mmocr |
Bharathdamu/wav2vec2-large-xls-r-300m-hindi | [
"pytorch",
"tensorboard",
"wav2vec2",
"automatic-speech-recognition",
"dataset:common_voice",
"transformers",
"generated_from_trainer",
"license:apache-2.0"
] | automatic-speech-recognition | {
"architectures": [
"Wav2Vec2ForCTC"
],
"model_type": "wav2vec2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 10 | 2022-07-16T07:34:00Z | ---
tags:
- conversational
---
# Dirk Strider DialoGPT Model |
BigSalmon/BlankSlots | [
"pytorch",
"jax",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible"
] | text2text-generation | {
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": true,
"length_penalty": 2,
"max_length": 200,
"min_length": 30,
"no_repeat_ngram_size": 3,
"num_beams": 4,
"prefix": "summarize: "
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to German: "
},
"translation_en_to_fr": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to French: "
},
"translation_en_to_ro": {
"early_stopping": true,
"max_length": 300,
"num_beams": 4,
"prefix": "translate English to Romanian: "
}
}
} | 4 | 2022-07-16T10:59:01Z | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imdb
model-index:
- name: distilbert-base-uncased-finetuned-imdb
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-imdb
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.
It achieves the following results on the evaluation set:
- Loss: 2.4721
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.7086 | 1.0 | 157 | 2.4898 |
| 2.5796 | 2.0 | 314 | 2.4230 |
| 2.5269 | 3.0 | 471 | 2.4354 |
### Framework versions
- Transformers 4.25.1
- Pytorch 1.13.0+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
BigSalmon/FormalBerta3 | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null | ---
language:
- en
- hi
- multilingual
license: apache-2.0
tags:
- translation
- Hindi
- generated_from_keras_callback
datasets:
- HindiEnglishCorpora
---
# opus-mt-finetuned-hi-en
This model is a fine-tuned version of [Helsinki-NLP/opus-mt-hi-en](https://huggingface.co/Helsinki-NLP/opus-mt-hi-en) on [HindiEnglish Corpora](https://www.clarin.eu/resource-families/parallel-corpora)
## Model description
The model is a transformer model similar to the [Transformer](https://arxiv.org/abs/1706.03762?context=cs) as defined in Attention Is All You Need by Vaswani et al
## Training and evaluation data
More information needed
## Training procedure
The model was trained on 2 NVIDIA_TESLA_A100 GPU's on Google's vertex AI platform.
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: AdamWeightDecay
- training_precision: float32
### Training results
### Framework versions
- Transformers 4.20.1
- TensorFlow 2.8.2
- Datasets 2.3.2
- Tokenizers 0.12.1
|
BigSalmon/InformalToFormalLincoln17 | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: bart-pizza
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bart-pizza
This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 30
### Training results
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu102
- Datasets 2.3.2
- Tokenizers 0.12.1
|
BigSalmon/InformalToFormalLincolnDistilledGPT2 | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-07-16T20:55:57Z | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: bart-pizza-5K
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bart-pizza-5K
This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1688
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 1
- eval_batch_size: 1
- seed: 42
- gradient_accumulation_steps: 16
- total_train_batch_size: 16
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 2
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.0171 | 1.6 | 500 | 0.1688 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu102
- Datasets 2.3.2
- Tokenizers 0.12.1
|
BigSalmon/MrLincoln | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-07-16T21:07:00Z | ---
library_name: stable-baselines3
tags:
- SpaceInvadersNoFrameskip-v4
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: DQN
results:
- metrics:
- type: mean_reward
value: 655.50 +/- 310.07
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: SpaceInvadersNoFrameskip-v4
type: SpaceInvadersNoFrameskip-v4
---
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4**
This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3)
and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo).
The RL Zoo is a training framework for Stable Baselines3
reinforcement learning agents,
with hyperparameter optimization and pre-trained agents included.
## Usage (with SB3 RL Zoo)
RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/>
SB3: https://github.com/DLR-RM/stable-baselines3<br/>
SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib
```
# Download model and save it into the logs/ folder
python -m utils.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Tstarshak -f logs/
python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
```
## Training (with the RL Zoo)
```
python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/
# Upload the model and generate video (when possible)
python -m utils.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Tstarshak
```
## Hyperparameters
```python
OrderedDict([('batch_size', 32),
('buffer_size', 100000),
('env_wrapper',
['stable_baselines3.common.atari_wrappers.AtariWrapper']),
('exploration_final_eps', 0.01),
('exploration_fraction', 0.1),
('frame_stack', 4),
('gradient_steps', 1),
('learning_rate', 0.0001),
('learning_starts', 100000),
('n_timesteps', 1000000.0),
('optimize_memory_usage', False),
('policy', 'CnnPolicy'),
('target_update_interval', 1000),
('train_freq', 4),
('normalize', False)])
```
|
BigSalmon/MrLincoln11 | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | 2022-07-16T22:04:12Z | ---
tags:
- conversational
---
# Uriel Dot DialoGT Model |
BigSalmon/MrLincoln2 | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
language: "pt"
widget:
- text: "Tinha uma pedra no meio do caminho."
- text: "Vamos tomar um cafรฉ quentinho?"
- text: "Como vocรช se chama?"
datasets:
- MacMorpho
---
# POS-Tagger Portuguese
We fine-tuned the [BERTimbau](https://github.com/neuralmind-ai/portuguese-bert/) model with the [MacMorpho](http://nilc.icmc.usp.br/macmorpho/) corpus for the Post-Tagger task, with 10 epochs, achieving a general F1-Score of 0.9826.
Metrics:
```
Precision Recall F1 Suport
accuracy 0.98 33729
macro avg 0.96 0.95 0.95 33729
weighted avg 0.98 0.98 0.98 33729
F1: 0.9826 Accuracy: 0.9826
```
Parameters:
```
nclasses = 27
nepochs = 30
batch_size = 32
batch_status = 32
learning_rate = 1e-5
early_stop = 3
max_length = 200
```
Tags:
| Tag | Meaning |
| ------------------- | ------------------- |
| ADJ | Adjetivo |
| ADV | Advรฉrbio |
| ADV-KS | Advรฉrbio conjuntivo subordinado |
| ADV-KS-REL | Advรฉrbio relativo subordinado |
| ART | Artigo |
| CUR | Moeda |
| IN | Interjeiรงรฃo |
| KC | Conjunรงรฃo coordenativa |
| KS | Conjunรงรฃo subordinativa |
| N | Substantivo |
| NPROP | Substantivo prรณprio |
| NUM | Nรบmero |
| PCP | Particรญpio |
| PDEN | Palavra denotativa |
| PREP | Preposiรงรฃo |
| PROADJ | Pronome Adjetivo |
| PRO-KS | Pronome conjuntivo subordinado |
| PRO-KS-REL | Pronome relativo conectivo subordinado |
| PROPESS | Pronome pessoal |
| PROSUB | Pronome nominal |
| V | Verbo |
| VAUX | Verbo auxiliar |
## Questions?
Please, post a Github issue on the [NLP Portuguese POS-Tagger](https://github.com/lisaterumi/nlp-portuguese-postagger). |
BigSalmon/MrLincoln3 | [
"pytorch",
"tensorboard",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 17 | 2022-07-17T01:52:27Z | ```
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("BigSalmon/GPTNeo1.3BInformalToFormal")
model = AutoModelForCausalLM.from_pretrained("BigSalmon/GPTNeo1.3BInformalToForma")
```
```
How To Make Prompt:
informal english: i am very ready to do that just that.
Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end.
Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task.
***
informal english: space is huge and needs to be explored.
Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless.
Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration.
***
informal english: corn fields are all across illinois, visible once you leave chicago.
Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.
informal english:
```
```
infill: chrome extensions [MASK] accomplish everyday tasks.
Translated into the Style of Abraham Lincoln: chrome extensions ( expedite the ability to / unlock the means to more readily ) accomplish everyday tasks.
infill: at a time when nintendo has become inflexible, [MASK] consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices.
Translated into the Style of Abraham Lincoln: at a time when nintendo has become inflexible, ( stubbornly [MASK] on / firmly set on / unyielding in its insistence on ) consoles that are tethered to a fixed iteration, sega diligently curates its legacy of classic video games on handheld devices.
infill:
```
```
Essay Intro (Warriors vs. Rockets in Game 7):
text: eagerly anticipated by fans, game 7's are the highlight of the post-season.
text: ever-building in suspense, game 7's have the crowd captivated.
***
Essay Intro (South Korean TV Is Becoming Popular):
text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ).
text: increasingly held in critical esteem, south korean television continues to impress.
text: at the forefront of quality content, south korea is quickly achieving celebrity status.
***
Essay Intro (
```
```
Search: What is the definition of Checks and Balances?
https://en.wikipedia.org/wiki/Checks_and_balances
Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate.
https://www.harvard.edu/glossary/Checks_and_Balances
Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power
https://www.law.cornell.edu/library/constitution/Checks_and_Balances
Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power.
***
Search: What is the definition of Separation of Powers?
https://en.wikipedia.org/wiki/Separation_of_powers
The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power.
https://www.yale.edu/tcf/Separation_of_Powers.html
Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined.
***
Search: What is the definition of Connection of Powers?
https://en.wikipedia.org/wiki/Connection_of_powers
Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches.
https://simple.wikipedia.org/wiki/Connection_of_powers
The term Connection of Powers describes a system of government in which there is overlap between different parts of the government.
***
Search: What is the definition of
```
```
Search: What are phrase synonyms for "second-guess"?
https://www.powerthesaurus.org/second-guess/synonyms
Shortest to Longest:
- feel dubious about
- raise an eyebrow at
- wrinkle their noses at
- cast a jaundiced eye at
- teeter on the fence about
***
Search: What are phrase synonyms for "mean to newbies"?
https://www.powerthesaurus.org/mean_to_newbies/synonyms
Shortest to Longest:
- readiness to balk at rookies
- absence of tolerance for novices
- hostile attitude toward newcomers
***
Search: What are phrase synonyms for "make use of"?
https://www.powerthesaurus.org/make_use_of/synonyms
Shortest to Longest:
- call upon
- glean value from
- reap benefits from
- derive utility from
- seize on the merits of
- draw on the strength of
- tap into the potential of
***
Search: What are phrase synonyms for "hurting itself"?
https://www.powerthesaurus.org/hurting_itself/synonyms
Shortest to Longest:
- erring
- slighting itself
- forfeiting its integrity
- doing itself a disservice
- evincing a lack of backbone
***
Search: What are phrase synonyms for "
```
```
original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick.
infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick.
***
original:
```
```
wordy: classical music is becoming less popular more and more.
Translate into Concise Text: interest in classic music is fading.
***
wordy:
```
make longer
```
sweet: savvy voters ousted him.
longer: voters who were informed delivered his defeat.
***
sweet: embodies compassion.
longer: is the personification of compassion.
***
sweet:
```
```
1: commercial space company spacex plans to launch a whopping 52 flights in 2022.
2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022.
3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights.
4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company.
5: a commercial space company, spacex aims to conduct 52 flights in 2022.
***
1:
```
```
ngos are characterized by:
โก voluntary citizens' group that is organized on a local, national or international level
โก encourage political participation
โก often serve humanitarian functions
โก work for social, economic, or environmental change
***
what are the drawbacks of living near an airbnb?
โก noise
โก parking
โก traffic
โก security
โก strangers
***
```
```
original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung.
adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung.
***
original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark.
adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark.
***
original:
```
```
original: work in an office ).
translated into journalism speak: ( beaver away in windowless offices / toil in drab cubicles / clock in at faceless workstations / report for duty in cheerless quarters / log hours in colorless confines / clack away on keyboards in offices with cinderblock walls / stare at computer screens in bland partitions / shuffle through mounds of paperwork in humdrum offices ).
***
original: easy job ).
translated into journalism speak: ( cushy / hassle-free / uninvolved / vanilla / sedentary / straightforward / effortless / lax / plush / frictionless / painless ) ( gig / perch / post / trade / calling / paycheck ).
***
original:
```
```
input: not loyal
1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ).
***
input:
```
```
original: big businesses ).
translated into journalism speak: corporate ( behemoths / heavyweights / titans / steamrollers / powerhouses / bigwigs / kahunas / brutes / honchos / barons / kingpins / rainmakers / headliners ).
***
original: environmental movement ).
translated into journalism speak: ( green lobby / conservationist camp / tree-huggers / ecology-obsessed / sustainability crusaders / preservation-crazed / ecological campaigners ).
***
original:
```
```
first: ( was complicit in / was involved in ).
antonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ).
***
first: ( have no qualms about / see no issue with ).
antonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ).
***
first: ( do not see eye to eye / disagree often ).
antonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ).
***
first:
``` |
BigSalmon/MrLincoln5 | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | 2022-07-17T02:32:50Z | ---
language: eu
license: cc-by-sa-4.0
datasets:
- cc100
- oscar
widget:
- text: "Euria egingo <mask> gaur ?"
- text: "<mask> umeari liburua eman dio."
- text: "Zein da zure <mask> ?"
---
## RoBERTa Basque small model (Uncased)
### Prerequisites
transformers==4.19.2
### Model architecture
This model uses approximately half the size of RoBERTa base model parameters.
### Tokenizer
Using BPE tokenizer with vocabulary size 50,000.
### Training Data
* Subset of [CC-100/eu](https://data.statmt.org/cc-100/) : Monolingual Datasets from Web Crawl Data
* Subset of [oscar](https://huggingface.co/datasets/oscar)
### Usage
```python
from transformers import pipeline
unmasker = pipeline('fill-mask', model='ClassCat/roberta-small-basque')
unmasker("Zein da zure <mask> ?")
``` |
BigSalmon/MrLincoln7 | [] | null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-07-17T02:47:25Z | ---
tags:
- conversational
---
# 707 DialoGPT Model
Chatbot for the character 707 from Mystic Messenger. |
BigSalmon/MrLincoln8 | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Pyramids
library_name: ml-agents
---
# **ppo** Agent playing **Pyramids**
This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
2. Step 1: Write your model_id: micheljperez/testpyramidsrnd
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play ๐
|
BigSalmon/MrLincolnBerta | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible",
"has_space"
] | fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | 2022-07-17T03:38:42Z | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- metrics:
- type: mean_reward
value: 218.99 +/- 76.60
name: mean_reward
task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
BigSalmon/PhraseBerta | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
] | fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 10 | null | ---
language: ko
---
# koenbert-base
์ต๊ทผ ๋ค์ํ ํ๊ตญ์ด ์ธ์ด ๋ชจ๋ธ๋ค์ด ๊ฐ๋ฐ ๋ฐ ๊ณต์ ๋๊ณ ์์ต๋๋ค. ํ์ง๋ง ์ด๋ฌํ ๋ชจ๋ธ๋ค์ ํ๊ตญ์ด๋ง ์ง์ํ๊ธฐ ๋๋ฌธ์ Dialog system, Information retrieval ๋ฑ ๋ค์ํ ๋๋ฉ์ธ์์ ์ ์๋๋ ์์ด ๋ฐ์ดํฐ๋ฅผ ํ์ฉํ๊ธฐ ์ด๋ ต๋ค๋ ํ๊ณ์ ์ด ์์ต๋๋ค. Multilingual ๋ชจ๋ธ์ ๊ฒฝ์ฐ ์ง์ํ๋ ์ธ์ด์ ์๊ฐ ๋ง์ ๋ชจ๋ธ ํฌ๊ธฐ๊ฐ ํฌ๊ณ ํ๊ตญ์ด ์ฑ๋ฅ์ด ๋จ์ด์ง๋ค๋ ๋จ์ ์ด ์์ต๋๋ค. ์ด๋ฌํ ํ๊ณ์ ์ ํด์ํ๊ณ ํ๊ตญ์ด ๋ชจ๋ธ์ ํ์ฉ๋๋ฅผ ๋์ด๊ธฐ ์ํด ํ๊ตญ์ด ์ธ์ด ๋ชจ๋ธ์ ์์ด๋ฅผ ํ์ตํ๋ ํ๋ก์ ํธ๋ฅผ ์งํํ๊ณ ์์ต๋๋ค. ๋ชจ๋ธ์ ๋ํ ์์ธํ ์ ๋ณด๋ [Github repo](https://github.com/respect5716/kobert-to-koenbert)์์ ํ์ธํด์ฃผ์ธ์.
## ์คํ
```python
from transfomers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained('respect5716/koen-bert-base')
model = AutoModel.from_pretrained('respect5716/koen-bert-base')
```
|
BigSalmon/Points2 | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"has_space"
] | text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
language: eu
license: cc-by-sa-4.0
datasets:
- cc100
- oscar
widget:
- text: "Zein da zure"
- text: "Euria egingo"
- text: "Nola dakizu ?"
---
## GPT2 Basque small model Version 2 (Uncased)
### Prerequisites
transformers==4.19.2
### Model architecture
This model uses approximately half the size of GPT2 base model parameters.
### Tokenizer
Using BPE tokenizer with vocabulary size 50,000.
### Training Data
* Subset of [CC-100/eu](https://data.statmt.org/cc-100/) : Monolingual Datasets from Web Crawl Data
* Subset of [oscar](https://huggingface.co/datasets/oscar)
### Usage
```python
from transformers import pipeline
generator = pipeline('text-generation', model='ClassCat/gpt2-small-basque-v2')
generator("Zein da zure ", max_length=50, num_return_sequences=5)
``` |
BigSalmon/TS3 | [
"pytorch",
"t5",
"text2text-generation",
"transformers",
"autotrain_compatible",
"has_space"
] | text2text-generation | {
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
datasets: vincentclaes/emoji-predictor
---
# Emoji Predictor
This model is Open AI's CLIP, fine-tuned on a few samples.
Try it here: https://huggingface.co/spaces/vincentclaes/emoji-predictor
- pretrained model: https://huggingface.co/openai/clip-vit-base-patch32
- dataset: https://huggingface.co/datasets/vincentclaes/emoji-predictor
Precision for predictions and suggestions for a range of samples per emoji:
```
| Samples | Prediction | Suggestion |
|--------- |------------ |------------ |
| 0 | 0.13 | 0.33 |
| 1 | 0.11 | 0.30 |
| 5 | 0.14 | 0.38 |
| 10 | 0.20 | 0.45 |
| 15 | 0.22 | 0.51 |
| 20 | 0.19 | 0.49 |
| 25 | 0.24 | 0.54 |
| 50 | 0.23 | 0.53 |
| 100 | 0.25 | 0.57 |
| 250 | 0.29 | 0.62 |
| 500 | 0.29 | 0.63 |
``` |
BigTooth/DialoGPT-Megumin | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 16 | null | ---
library_name: keras
---
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Model Plot
<details>
<summary>View Model Plot</summary>

</details> |
BinksSachary/DialoGPT-small-shaxx | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | 2022-07-17T09:36:03Z | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: distilbert-base-uncased-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.9276948590381426
- name: Recall
type: recall
value: 0.9386956035350711
- name: F1
type: f1
value: 0.9331628113879005
- name: Accuracy
type: accuracy
value: 0.9842883695807584
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-ner
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0574
- Precision: 0.9277
- Recall: 0.9387
- F1: 0.9332
- Accuracy: 0.9843
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.2384 | 1.0 | 878 | 0.0701 | 0.9130 | 0.9220 | 0.9175 | 0.9803 |
| 0.0494 | 2.0 | 1756 | 0.0593 | 0.9222 | 0.9314 | 0.9268 | 0.9829 |
| 0.0301 | 3.0 | 2634 | 0.0574 | 0.9277 | 0.9387 | 0.9332 | 0.9843 |
### Framework versions
- Transformers 4.20.1
- Pytorch 1.12.0+cu113
- Datasets 2.3.2
- Tokenizers 0.12.1
|
BinksSachary/ShaxxBot2 | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
] | conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | 2022-07-17T10:06:33Z | ---
license: mit
---
# Label - Emotion Table
| Emotion | LABEL |
| -------------- |:-------------: |
| Anger | LABEL_0 |
| Boredom | LABEL_1 |
| Empty | LABEL_2 |
| Enthusiasm | LABEL_3 |
| Fear | LABEL_4 |
| Fun | LABEL_5 |
| Happiness | LABEL_6 |
| Hate | LABEL_7 |
| Joy | LABEL_8 |
| Love | LABEL_9 |
| Neutral | LABEL_10 |
| Relief | LABEL_11 |
| Sadness | LABEL_12 |
| Surprise | LABEL_13 |
| Worry | LABEL_14 |
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.