modelId
stringlengths 4
81
| tags
list | pipeline_tag
stringclasses 17
values | config
dict | downloads
int64 0
59.7M
| first_commit
timestamp[ns, tz=UTC] | card
stringlengths 51
438k
|
---|---|---|---|---|---|---|
Bubb-les/DisloGPT-medium-HarryPotter | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: roberta-base-fine-Disaster-Tweets-Part3
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# roberta-base-fine-Disaster-Tweets-Part3
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3882
- Accuracy: 0.8380
- F1: 0.8377
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 8e-05
- train_batch_size: 32
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 2
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| No log | 1.0 | 203 | 0.4632 | 0.8179 | 0.8184 |
| No log | 2.0 | 406 | 0.3882 | 0.8380 | 0.8377 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa | [
"pytorch",
"tf",
"bert",
"token-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 71 | null | ---
tags:
- generated_from_trainer
datasets:
- yelp_review_full
metrics:
- accuracy
model-index:
- name: yelp_review_rating_reberta_base
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: yelp_review_full
type: yelp_review_full
config: yelp_review_full
split: train
args: yelp_review_full
metrics:
- name: Accuracy
type: accuracy
value: 0.67086
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# yelp_review_rating_reberta_base
This model was trained from scratch on the yelp_review_full dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8071
- Accuracy: 0.6709
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- num_epochs: 6
### Training results
| Training Loss | Epoch | Step | Accuracy | Validation Loss |
|:-------------:|:-----:|:------:|:--------:|:---------------:|
| 0.8355 | 1.0 | 40625 | 0.6449 | 0.8211 |
| 0.7709 | 2.0 | 81250 | 0.6615 | 0.7877 |
| 0.7141 | 3.0 | 121875 | 0.6712 | 0.7689 |
| 0.6511 | 4.0 | 162500 | 0.6724 | 0.7845 |
| 0.6229 | 5.0 | 203125 | 0.6719 | 0.8009 |
| 0.6036 | 6.0 | 243750 | 0.8071 | 0.6709 |
### Framework versions
- Transformers 4.22.2
- Pytorch 1.12.1+cu102
- Datasets 2.6.1
- Tokenizers 0.12.1
|
CAMeL-Lab/bert-base-arabic-camelbert-ca | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 580 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-mlm-medium
A medium-size BERT Language Model with an **MLM** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
CAMeL-Lab/bert-base-arabic-camelbert-da-poetry | [
"pytorch",
"tf",
"bert",
"text-classification",
"ar",
"arxiv:1905.05700",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 37 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-sr-medium
A medium-size BERT Language Model with a **shuffle + random** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-glf | [
"pytorch",
"tf",
"bert",
"token-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 54 | null | ---
tags:
- Pong-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-Pong-PLE-v0
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pong-PLE-v0
type: Pong-PLE-v0
metrics:
- type: mean_reward
value: -15.96 +/- 0.72
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **Pong-PLE-v0**
This is a trained model of a **Reinforce** agent playing **Pong-PLE-v0** .
To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
|
CAMeL-Lab/bert-base-arabic-camelbert-mix-ner | [
"pytorch",
"tf",
"bert",
"token-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible",
"has_space"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1,860 | null | ---
language: en
license: apache-2.0
library_name: diffusers
tags: []
datasets: huggan/smithsonian_butterflies_subset
metrics: []
---
<!-- This model card has been generated automatically according to the information the training script had access to. You
should probably proofread and complete it, then remove this comment. -->
# ddpm-butterflies-128
## Model description
This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library
on the `huggan/smithsonian_butterflies_subset` dataset.
## Intended uses & limitations
#### How to use
```python
# TODO: add an example code snippet for running this diffusion pipeline
```
#### Limitations and bias
[TODO: provide examples of latent issues and potential remediations]
## Training data
[TODO: describe the data used to train the model]
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 16
- gradient_accumulation_steps: 1
- optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None
- lr_scheduler: None
- lr_warmup_steps: 500
- ema_inv_gamma: None
- ema_inv_gamma: None
- ema_inv_gamma: None
- mixed_precision: fp16
### Training results
📈 [TensorBoard logs](https://huggingface.co/imraan/ddpm-butterflies-128/tensorboard?#scalars)
|
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-glf | [
"pytorch",
"tf",
"bert",
"token-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 132 | null | ---
license: mit
tags:
- generated_from_trainer
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de-fr
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de-fr
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1661
- F1: 0.8557
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2935 | 1.0 | 715 | 0.1887 | 0.8216 |
| 0.1476 | 2.0 | 1430 | 0.1625 | 0.8473 |
| 0.0955 | 3.0 | 2145 | 0.1661 | 0.8557 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.1+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi | [
"pytorch",
"tf",
"bert",
"text-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 71 | null | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-de
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.de
metrics:
- name: F1
type: f1
value: 0.8648740833380706
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-de
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1365
- F1: 0.8649
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.2553 | 1.0 | 525 | 0.1575 | 0.8279 |
| 0.1284 | 2.0 | 1050 | 0.1386 | 0.8463 |
| 0.0813 | 3.0 | 1575 | 0.1365 | 0.8649 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.1+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry | [
"pytorch",
"tf",
"bert",
"text-classification",
"ar",
"arxiv:1905.05700",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 25 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert-base-uncased-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: cola
split: train
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.5528474752734607
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6169
- Matthews Correlation: 0.5528
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.5233 | 1.0 | 535 | 0.5188 | 0.4126 |
| 0.3459 | 2.0 | 1070 | 0.5068 | 0.4955 |
| 0.2316 | 3.0 | 1605 | 0.6169 | 0.5528 |
| 0.1748 | 4.0 | 2140 | 0.8007 | 0.5306 |
| 0.1274 | 5.0 | 2675 | 0.8444 | 0.5440 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CAMeL-Lab/bert-base-arabic-camelbert-msa-pos-msa | [
"pytorch",
"tf",
"bert",
"token-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 133 | null | ---
license: creativeml-openrail-m
thumbnail: "https://huggingface.co/dallinmackay/JWST-Deep-Space-diffusion/resolve/main/previewJWST.jpg"
tags:
- stable-diffusion
- text-to-image
---
### JWST Deep Space Diffusion
This is a fine-tuned Stable Diffusion model (based on v1.5) trained on images taken by the **_James Webb Space Telescope_**, as well as Judy Schmidt. Use the token **_JWST_** in your prompts to use the style (e.g., "jwst, green spiral galaxy").
[CKPT download link](https://huggingface.co/dallinmackay/JWST-Deep-Space-diffusion/resolve/main/JWST-Deep-Space.ckpt)
**Images rendered with this model:**
_prompt and settings used: **"JWST"** | **Steps: 25, Sampler: Euler_a, CFG scale: 7**_

--
[](https://www.patreon.com/dallinmackay)
--
This model was trained with Dreambooth, using TheLastBen colab notebook
--
### 🧨 Diffusers
This model can be used just like any other Stable Diffusion model. For more information,
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
## License
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
The CreativeML OpenRAIL License specifies:
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license) |
CAMeL-Lab/bert-base-arabic-camelbert-msa-quarter | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
tags:
- generated_from_trainer
model-index:
- name: chemical-bert-uncased-finetuned-cust
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# chemical-bert-uncased-finetuned-cust
This model is a fine-tuned version of [recobo/chemical-bert-uncased](https://huggingface.co/recobo/chemical-bert-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.7104
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 200
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 3.5876 | 1.0 | 63 | 2.7997 |
| 2.7843 | 2.0 | 126 | 2.3734 |
| 2.418 | 3.0 | 189 | 2.1510 |
| 2.2247 | 4.0 | 252 | 1.9822 |
| 2.062 | 5.0 | 315 | 1.8463 |
| 1.9875 | 6.0 | 378 | 1.8293 |
| 1.9034 | 7.0 | 441 | 1.7666 |
| 1.7818 | 8.0 | 504 | 1.6783 |
| 1.7131 | 9.0 | 567 | 1.5754 |
| 1.6793 | 10.0 | 630 | 1.5480 |
| 1.5773 | 11.0 | 693 | 1.4568 |
| 1.5391 | 12.0 | 756 | 1.5101 |
| 1.5049 | 13.0 | 819 | 1.4340 |
| 1.4476 | 14.0 | 882 | 1.4046 |
| 1.4032 | 15.0 | 945 | 1.3593 |
| 1.395 | 16.0 | 1008 | 1.3689 |
| 1.3353 | 17.0 | 1071 | 1.3350 |
| 1.3122 | 18.0 | 1134 | 1.2863 |
| 1.3036 | 19.0 | 1197 | 1.3690 |
| 1.2644 | 20.0 | 1260 | 1.1904 |
| 1.222 | 21.0 | 1323 | 1.1986 |
| 1.2091 | 22.0 | 1386 | 1.1650 |
| 1.2007 | 23.0 | 1449 | 1.1949 |
| 1.1456 | 24.0 | 1512 | 1.1649 |
| 1.1426 | 25.0 | 1575 | 1.1498 |
| 1.0883 | 26.0 | 1638 | 1.1489 |
| 1.0915 | 27.0 | 1701 | 1.1179 |
| 1.0635 | 28.0 | 1764 | 1.0726 |
| 1.0899 | 29.0 | 1827 | 1.1107 |
| 1.0251 | 30.0 | 1890 | 1.0944 |
| 1.0387 | 31.0 | 1953 | 1.0488 |
| 1.0037 | 32.0 | 2016 | 1.0679 |
| 1.0101 | 33.0 | 2079 | 1.0272 |
| 0.9595 | 34.0 | 2142 | 1.0158 |
| 0.9661 | 35.0 | 2205 | 1.0316 |
| 0.9535 | 36.0 | 2268 | 1.0086 |
| 0.9269 | 37.0 | 2331 | 1.0221 |
| 0.9395 | 38.0 | 2394 | 0.9626 |
| 0.9105 | 39.0 | 2457 | 0.9903 |
| 0.8888 | 40.0 | 2520 | 0.9892 |
| 0.9316 | 41.0 | 2583 | 0.9786 |
| 0.8804 | 42.0 | 2646 | 0.9938 |
| 0.8589 | 43.0 | 2709 | 1.0105 |
| 0.8573 | 44.0 | 2772 | 0.9729 |
| 0.8566 | 45.0 | 2835 | 0.9972 |
| 0.8392 | 46.0 | 2898 | 1.0085 |
| 0.8363 | 47.0 | 2961 | 0.9336 |
| 0.8184 | 48.0 | 3024 | 0.9886 |
| 0.7964 | 49.0 | 3087 | 0.9661 |
| 0.8025 | 50.0 | 3150 | 0.8956 |
| 0.8156 | 51.0 | 3213 | 0.9415 |
| 0.7906 | 52.0 | 3276 | 0.9381 |
| 0.7783 | 53.0 | 3339 | 0.9445 |
| 0.7696 | 54.0 | 3402 | 0.8859 |
| 0.763 | 55.0 | 3465 | 0.8851 |
| 0.7638 | 56.0 | 3528 | 0.9128 |
| 0.7576 | 57.0 | 3591 | 0.8629 |
| 0.757 | 58.0 | 3654 | 0.8917 |
| 0.7232 | 59.0 | 3717 | 0.8956 |
| 0.7327 | 60.0 | 3780 | 0.8727 |
| 0.7321 | 61.0 | 3843 | 0.8558 |
| 0.7131 | 62.0 | 3906 | 0.8876 |
| 0.696 | 63.0 | 3969 | 0.8872 |
| 0.6996 | 64.0 | 4032 | 0.7758 |
| 0.6807 | 65.0 | 4095 | 0.8657 |
| 0.6899 | 66.0 | 4158 | 0.8813 |
| 0.6873 | 67.0 | 4221 | 0.8488 |
| 0.6681 | 68.0 | 4284 | 0.8865 |
| 0.6758 | 69.0 | 4347 | 0.8447 |
| 0.6626 | 70.0 | 4410 | 0.8421 |
| 0.6535 | 71.0 | 4473 | 0.8313 |
| 0.6505 | 72.0 | 4536 | 0.8636 |
| 0.6654 | 73.0 | 4599 | 0.8433 |
| 0.6363 | 74.0 | 4662 | 0.7666 |
| 0.6395 | 75.0 | 4725 | 0.8882 |
| 0.6206 | 76.0 | 4788 | 0.8409 |
| 0.6365 | 77.0 | 4851 | 0.8807 |
| 0.6325 | 78.0 | 4914 | 0.8012 |
| 0.6142 | 79.0 | 4977 | 0.7705 |
| 0.6108 | 80.0 | 5040 | 0.8270 |
| 0.62 | 81.0 | 5103 | 0.8552 |
| 0.6188 | 82.0 | 5166 | 0.8377 |
| 0.6024 | 83.0 | 5229 | 0.7985 |
| 0.631 | 84.0 | 5292 | 0.8352 |
| 0.5871 | 85.0 | 5355 | 0.8086 |
| 0.6014 | 86.0 | 5418 | 0.8129 |
| 0.5842 | 87.0 | 5481 | 0.8649 |
| 0.5837 | 88.0 | 5544 | 0.8269 |
| 0.5958 | 89.0 | 5607 | 0.8407 |
| 0.564 | 90.0 | 5670 | 0.7906 |
| 0.5748 | 91.0 | 5733 | 0.7393 |
| 0.5918 | 92.0 | 5796 | 0.8445 |
| 0.5682 | 93.0 | 5859 | 0.8073 |
| 0.5497 | 94.0 | 5922 | 0.8165 |
| 0.5606 | 95.0 | 5985 | 0.7638 |
| 0.5593 | 96.0 | 6048 | 0.7929 |
| 0.5556 | 97.0 | 6111 | 0.7991 |
| 0.5604 | 98.0 | 6174 | 0.7417 |
| 0.5503 | 99.0 | 6237 | 0.8070 |
| 0.5561 | 100.0 | 6300 | 0.7845 |
| 0.5344 | 101.0 | 6363 | 0.7933 |
| 0.5209 | 102.0 | 6426 | 0.7741 |
| 0.5337 | 103.0 | 6489 | 0.7760 |
| 0.5437 | 104.0 | 6552 | 0.7634 |
| 0.5165 | 105.0 | 6615 | 0.7543 |
| 0.5343 | 106.0 | 6678 | 0.7661 |
| 0.5155 | 107.0 | 6741 | 0.7953 |
| 0.512 | 108.0 | 6804 | 0.8253 |
| 0.5259 | 109.0 | 6867 | 0.7570 |
| 0.5045 | 110.0 | 6930 | 0.7977 |
| 0.5115 | 111.0 | 6993 | 0.7598 |
| 0.5134 | 112.0 | 7056 | 0.7680 |
| 0.5076 | 113.0 | 7119 | 0.7696 |
| 0.5126 | 114.0 | 7182 | 0.7451 |
| 0.4963 | 115.0 | 7245 | 0.7923 |
| 0.5032 | 116.0 | 7308 | 0.7842 |
| 0.5137 | 117.0 | 7371 | 0.7239 |
| 0.488 | 118.0 | 7434 | 0.8188 |
| 0.4938 | 119.0 | 7497 | 0.7479 |
| 0.4866 | 120.0 | 7560 | 0.7761 |
| 0.4901 | 121.0 | 7623 | 0.7930 |
| 0.4877 | 122.0 | 7686 | 0.7733 |
| 0.4858 | 123.0 | 7749 | 0.7492 |
| 0.4813 | 124.0 | 7812 | 0.7645 |
| 0.4817 | 125.0 | 7875 | 0.7938 |
| 0.4822 | 126.0 | 7938 | 0.7253 |
| 0.4771 | 127.0 | 8001 | 0.7481 |
| 0.4769 | 128.0 | 8064 | 0.7402 |
| 0.4666 | 129.0 | 8127 | 0.7993 |
| 0.474 | 130.0 | 8190 | 0.7653 |
| 0.4718 | 131.0 | 8253 | 0.7524 |
| 0.4682 | 132.0 | 8316 | 0.7129 |
| 0.4698 | 133.0 | 8379 | 0.7806 |
| 0.4669 | 134.0 | 8442 | 0.7237 |
| 0.4401 | 135.0 | 8505 | 0.7185 |
| 0.4656 | 136.0 | 8568 | 0.7542 |
| 0.4569 | 137.0 | 8631 | 0.7412 |
| 0.4751 | 138.0 | 8694 | 0.7740 |
| 0.4474 | 139.0 | 8757 | 0.7636 |
| 0.4652 | 140.0 | 8820 | 0.7958 |
| 0.4539 | 141.0 | 8883 | 0.7410 |
| 0.4452 | 142.0 | 8946 | 0.7652 |
| 0.4516 | 143.0 | 9009 | 0.7337 |
| 0.4423 | 144.0 | 9072 | 0.7601 |
| 0.4542 | 145.0 | 9135 | 0.7692 |
| 0.4328 | 146.0 | 9198 | 0.7528 |
| 0.4503 | 147.0 | 9261 | 0.7673 |
| 0.4416 | 148.0 | 9324 | 0.7193 |
| 0.447 | 149.0 | 9387 | 0.7517 |
| 0.4434 | 150.0 | 9450 | 0.7241 |
| 0.4374 | 151.0 | 9513 | 0.7281 |
| 0.4334 | 152.0 | 9576 | 0.7150 |
| 0.4209 | 153.0 | 9639 | 0.7531 |
| 0.4405 | 154.0 | 9702 | 0.7252 |
| 0.4384 | 155.0 | 9765 | 0.7367 |
| 0.4265 | 156.0 | 9828 | 0.7111 |
| 0.4386 | 157.0 | 9891 | 0.7215 |
| 0.4276 | 158.0 | 9954 | 0.7119 |
| 0.4289 | 159.0 | 10017 | 0.7587 |
| 0.4415 | 160.0 | 10080 | 0.7935 |
| 0.4315 | 161.0 | 10143 | 0.7574 |
| 0.4227 | 162.0 | 10206 | 0.7296 |
| 0.4352 | 163.0 | 10269 | 0.7145 |
| 0.4108 | 164.0 | 10332 | 0.7133 |
| 0.433 | 165.0 | 10395 | 0.7369 |
| 0.4336 | 166.0 | 10458 | 0.7471 |
| 0.4016 | 167.0 | 10521 | 0.7329 |
| 0.4164 | 168.0 | 10584 | 0.7331 |
| 0.4182 | 169.0 | 10647 | 0.7449 |
| 0.4136 | 170.0 | 10710 | 0.7365 |
| 0.4183 | 171.0 | 10773 | 0.7248 |
| 0.4225 | 172.0 | 10836 | 0.7346 |
| 0.4294 | 173.0 | 10899 | 0.7099 |
| 0.4113 | 174.0 | 10962 | 0.7264 |
| 0.4216 | 175.0 | 11025 | 0.6822 |
| 0.4208 | 176.0 | 11088 | 0.7198 |
| 0.407 | 177.0 | 11151 | 0.7266 |
| 0.4164 | 178.0 | 11214 | 0.7466 |
| 0.4112 | 179.0 | 11277 | 0.7409 |
| 0.4067 | 180.0 | 11340 | 0.7058 |
| 0.4297 | 181.0 | 11403 | 0.6918 |
| 0.4137 | 182.0 | 11466 | 0.7432 |
| 0.4102 | 183.0 | 11529 | 0.7272 |
| 0.4184 | 184.0 | 11592 | 0.7309 |
| 0.4049 | 185.0 | 11655 | 0.7215 |
| 0.4097 | 186.0 | 11718 | 0.7375 |
| 0.419 | 187.0 | 11781 | 0.7575 |
| 0.4122 | 188.0 | 11844 | 0.7481 |
| 0.4089 | 189.0 | 11907 | 0.7790 |
| 0.4094 | 190.0 | 11970 | 0.7547 |
| 0.4107 | 191.0 | 12033 | 0.7390 |
| 0.4044 | 192.0 | 12096 | 0.7472 |
| 0.4065 | 193.0 | 12159 | 0.7283 |
| 0.4172 | 194.0 | 12222 | 0.7112 |
| 0.4124 | 195.0 | 12285 | 0.7470 |
| 0.4026 | 196.0 | 12348 | 0.7067 |
| 0.4179 | 197.0 | 12411 | 0.7259 |
| 0.4027 | 198.0 | 12474 | 0.7328 |
| 0.4101 | 199.0 | 12537 | 0.6891 |
| 0.3969 | 200.0 | 12600 | 0.7104 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CAMeL-Lab/bert-base-arabic-camelbert-msa-sentiment | [
"pytorch",
"tf",
"bert",
"text-classification",
"ar",
"arxiv:2103.06678",
"transformers",
"license:apache-2.0"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 574 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-large-xls-r-1b-korean-convsen1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-large-xls-r-1b-korean-convsen1
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0014
- Cer: 0.0002
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 4
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Cer |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 1.3161 | 1.0 | 1762 | 0.1495 | 0.0443 |
| 0.1188 | 2.0 | 3524 | 0.0125 | 0.0033 |
| 0.0399 | 3.0 | 5286 | 0.0014 | 0.0002 |
### Framework versions
- Transformers 4.18.0
- Pytorch 1.13.0
- Datasets 2.6.1
- Tokenizers 0.11.0
|
CLAck/en-vi | [
"pytorch",
"marian",
"text2text-generation",
"en",
"vi",
"dataset:ALT",
"transformers",
"translation",
"license:apache-2.0",
"autotrain_compatible"
]
| translation | {
"architectures": [
"MarianMTModel"
],
"model_type": "marian",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | 2022-11-09T01:44:38Z | ---
license: cc-by-nc-4.0
tags:
- generated_from_trainer
- video-classification
- videomae
- vision
metrics:
- accuracy
model-index:
- name: videomae-base-finetuned-ucf101-subset
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# videomae-base-finetuned-ucf101-subset
This model is a fine-tuned version of [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3992
- Accuracy: 0.8645
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- training_steps: 148
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 2.1374 | 0.26 | 38 | 1.7413 | 0.5714 |
| 0.7949 | 1.26 | 76 | 0.7747 | 0.8 |
| 0.4279 | 2.26 | 114 | 0.4053 | 0.9143 |
| 0.291 | 3.23 | 148 | 0.3429 | 0.9286 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CLS/WubiBERT_models | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-11-09T02:08:51Z | ---
license: creativeml-openrail-m
tags:
- stable-diffusion
- text-to-image
---
# Guohua Diffusion
This is the fine-tuned Stable Diffusion model trained on traditional Chinese paintings.
Use **guohua style** in your prompts for the effect.
## Sample Image


## How to use
#### WebUI
Download the `guohua.ckpt` in model files.
#### Diffusers
This model can be used just like any other Stable Diffusion model. For more information,
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
```python
#!pip install diffusers transformers scipy torch
from diffusers import StableDiffusionPipeline
import torch
model_id = "Langboat/Guohua-Diffusion"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
prompt = "The Godfather poster in guohua style"
image = pipe(prompt).images[0]
image.save("./the_god_father.png")
```
|
CLTL/gm-ner-xlmrbase | [
"pytorch",
"tf",
"xlm-roberta",
"token-classification",
"nl",
"transformers",
"dighum",
"license:apache-2.0",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"XLMRobertaForTokenClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 2 | 2022-11-09T02:13:23Z | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: bert-finetuned-ner
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-finetuned-ner
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| No log | 1.0 | 120 | 0.0053 | 0.8410 | 0.9372 | 0.8865 | 0.9991 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CNT-UPenn/Bio_ClinicalBERT_for_seizureFreedom_classification | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 28 | 2022-11-09T02:57:58Z | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information -->
|
CSResearcher/TestModel | [
"license:mit"
]
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: toanbui1991/distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# toanbui1991/distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 1.5101
- Train End Logits Accuracy: 0.6065
- Train Start Logits Accuracy: 0.5692
- Validation Loss: 1.1679
- Validation End Logits Accuracy: 0.6823
- Validation Start Logits Accuracy: 0.6523
- Epoch: 0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 11064, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch |
|:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:|
| 1.5101 | 0.6065 | 0.5692 | 1.1679 | 0.6823 | 0.6523 | 0 |
### Framework versions
- Transformers 4.24.0
- TensorFlow 2.10.0
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Cameron/BERT-mdgender-convai-ternary | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 38 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-cased-ner-fcit499
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
config: conll2003
split: train
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.9417409184372858
- name: Recall
type: recall
value: 0.950207468879668
- name: F1
type: f1
value: 0.9459552495697073
- name: Accuracy
type: accuracy
value: 0.9905416329830234
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-cased-ner-fcit499
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0404
- Precision: 0.9417
- Recall: 0.9502
- F1: 0.9460
- Accuracy: 0.9905
## Model description
More information neededx
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| No log | 1.0 | 157 | 0.0578 | 0.8782 | 0.8976 | 0.8878 | 0.9825 |
| No log | 2.0 | 314 | 0.0425 | 0.9317 | 0.9343 | 0.9330 | 0.9885 |
| No log | 3.0 | 471 | 0.0391 | 0.9381 | 0.9433 | 0.9407 | 0.9897 |
| 0.1097 | 4.0 | 628 | 0.0397 | 0.9377 | 0.9467 | 0.9422 | 0.9900 |
| 0.1097 | 5.0 | 785 | 0.0404 | 0.9417 | 0.9502 | 0.9460 | 0.9905 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Cameron/BERT-rtgender-opgender-annotations | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 33 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: whisper3_0020
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# whisper3_0020
This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.1844
- Train Accuracy: 0.0334
- Validation Loss: 0.5619
- Validation Accuracy: 0.0313
- Epoch: 19
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch |
|:----------:|:--------------:|:---------------:|:-------------------:|:-----:|
| 5.0832 | 0.0116 | 4.4298 | 0.0124 | 0 |
| 4.3130 | 0.0131 | 4.0733 | 0.0141 | 1 |
| 3.9211 | 0.0146 | 3.6762 | 0.0157 | 2 |
| 3.5505 | 0.0159 | 3.3453 | 0.0171 | 3 |
| 3.1592 | 0.0175 | 2.8062 | 0.0199 | 4 |
| 2.2581 | 0.0220 | 1.7622 | 0.0252 | 5 |
| 1.4671 | 0.0259 | 1.2711 | 0.0276 | 6 |
| 1.0779 | 0.0278 | 1.0220 | 0.0288 | 7 |
| 0.8591 | 0.0290 | 0.8836 | 0.0295 | 8 |
| 0.7159 | 0.0297 | 0.7918 | 0.0300 | 9 |
| 0.6105 | 0.0304 | 0.7276 | 0.0303 | 10 |
| 0.5287 | 0.0309 | 0.6850 | 0.0306 | 11 |
| 0.4614 | 0.0313 | 0.6472 | 0.0308 | 12 |
| 0.4049 | 0.0317 | 0.6199 | 0.0310 | 13 |
| 0.3562 | 0.0320 | 0.6019 | 0.0311 | 14 |
| 0.3139 | 0.0324 | 0.5868 | 0.0311 | 15 |
| 0.2766 | 0.0326 | 0.5751 | 0.0312 | 16 |
| 0.2438 | 0.0329 | 0.5701 | 0.0312 | 17 |
| 0.2116 | 0.0332 | 0.5686 | 0.0313 | 18 |
| 0.1844 | 0.0334 | 0.5619 | 0.0313 | 19 |
### Framework versions
- Transformers 4.25.0.dev0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
dccuchile/albert-base-spanish-finetuned-pawsx | [
"pytorch",
"albert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"AlbertForSequenceClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 25 | null | ---
tags:
- generated_from_trainer
model-index:
- name: pz-bert-kr
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pz-bert-kr
This model is a fine-tuned version of [Hanwoon/pz-bert-kr](https://huggingface.co/Hanwoon/bert-kor-base-pz-language-test) on the Multiple datasets.
It achieves the following results on the evaluation set:
- Loss: 2.6540
## Model description
Korean Language Bert Model
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 20
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:------:|:---------------:|
| 2.7509 | 1.0 | 10546 | 2.7836 |
| 2.7325 | 2.0 | 21092 | 2.7776 |
| 2.6884 | 3.0 | 31638 | 2.7732 |
| 2.6839 | 4.0 | 42184 | 2.7663 |
| 2.655 | 5.0 | 52730 | 2.7548 |
| 2.6475 | 6.0 | 63276 | 2.7388 |
| 2.6172 | 7.0 | 73822 | 2.7406 |
| 2.6177 | 8.0 | 84368 | 2.7320 |
| 2.5885 | 9.0 | 94914 | 2.7121 |
| 2.5743 | 10.0 | 105460 | 2.7156 |
| 2.5652 | 11.0 | 116006 | 2.7047 |
| 2.5642 | 12.0 | 126552 | 2.6916 |
| 2.5644 | 13.0 | 137098 | 2.7033 |
| 2.5136 | 14.0 | 147644 | 2.6833 |
| 2.532 | 15.0 | 158190 | 2.6742 |
| 2.5224 | 16.0 | 168736 | 2.6702 |
| 2.5268 | 17.0 | 179282 | 2.6661 |
| 2.5077 | 18.0 | 189828 | 2.6629 |
| 2.5061 | 19.0 | 200374 | 2.6657 |
| 2.4853 | 20.0 | 210920 | 2.6540 |
### Framework versions
- Transformers 4.23.1
- Pytorch 1.12.1
- Datasets 2.6.1
- Tokenizers 0.13.1
|
dccuchile/albert-base-spanish-finetuned-pos | [
"pytorch",
"albert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language: en
license: apache-2.0
library_name: diffusers
tags: []
datasets: huggan/smithsonian_butterflies_subset
metrics: []
---
<!-- This model card has been generated automatically according to the information the training script had access to. You
should probably proofread and complete it, then remove this comment. -->
# ddpm-butterflies-128
## Model description
This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library
on the `huggan/smithsonian_butterflies_subset` dataset.
## Intended uses & limitations
#### How to use
```python
# TODO: add an example code snippet for running this diffusion pipeline
```
#### Limitations and bias
[TODO: provide examples of latent issues and potential remediations]
## Training data
[TODO: describe the data used to train the model]
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 16
- gradient_accumulation_steps: 1
- optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None
- lr_scheduler: None
- lr_warmup_steps: 500
- ema_inv_gamma: None
- ema_inv_gamma: None
- ema_inv_gamma: None
- mixed_precision: fp16
### Training results
📈 [TensorBoard logs](https://huggingface.co/Chalet37/ddpm-butterflies-128/tensorboard?#scalars)
|
dccuchile/albert-base-spanish-finetuned-qa-mlqa | [
"pytorch",
"albert",
"question-answering",
"transformers",
"autotrain_compatible"
]
| question-answering | {
"architectures": [
"AlbertForQuestionAnswering"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2022-11-09T06:41:27Z | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: GPT2-LM-Finetuned-MBTI
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# GPT2-LM-Finetuned-MBTI
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 3.9582
- Lm loss: 3.9581
- Perplexity: 52.36
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Lm loss | Perplexity |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:----------:|
| 4.1981 | 1.0 | 3470 | 4.0349 | 4.0348 | 56.53 |
| 4.0457 | 2.0 | 6940 | 3.9963 | 3.9962 | 54.39 |
| 3.9757 | 3.0 | 10410 | 3.9815 | 3.9814 | 53.59 |
| 3.9247 | 4.0 | 13880 | 3.9701 | 3.9701 | 52.99 |
| 3.885 | 5.0 | 17350 | 3.9614 | 3.9613 | 52.52 |
| 3.8523 | 6.0 | 20820 | 3.9627 | 3.9627 | 52.60 |
| 3.8274 | 7.0 | 24290 | 3.9607 | 3.9606 | 52.49 |
| 3.8076 | 8.0 | 27760 | 3.9585 | 3.9584 | 52.37 |
| 3.7924 | 9.0 | 31230 | 3.9576 | 3.9575 | 52.33 |
| 3.782 | 10.0 | 34700 | 3.9582 | 3.9581 | 52.36 |
### Framework versions
- Transformers 4.21.2
- Pytorch 1.12.1
- Datasets 2.4.0
- Tokenizers 0.12.1
|
dccuchile/albert-base-spanish-finetuned-xnli | [
"pytorch",
"albert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"AlbertForSequenceClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 28 | null | ---
language: en
thumbnail: http://www.huggingtweets.com/dailystoic-thestoicemperor-thetweetofgod/1667978138895/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1272553610434338816/-pN7JIO6_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1513636967090917378/u3n2blUC_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/851774550631104514/FnBLKlzZ_400x400.jpg')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Daily Stoic & God (Thee/Thy) & The Stoic Emperor</div>
<div style="text-align: center; font-size: 14px;">@dailystoic-thestoicemperor-thetweetofgod</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Daily Stoic & God (Thee/Thy) & The Stoic Emperor.
| Data | Daily Stoic | God (Thee/Thy) | The Stoic Emperor |
| --- | --- | --- | --- |
| Tweets downloaded | 3250 | 3241 | 1431 |
| Retweets | 87 | 109 | 7 |
| Short tweets | 34 | 99 | 40 |
| Tweets kept | 3129 | 3033 | 1384 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1ho61rre/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @dailystoic-thestoicemperor-thetweetofgod's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/3uv3jslg) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/3uv3jslg/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/dailystoic-thestoicemperor-thetweetofgod')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
dccuchile/albert-large-spanish-finetuned-mldoc | [
"pytorch",
"albert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"AlbertForSequenceClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 27 | null | ---
language: en
thumbnail: http://www.huggingtweets.com/mumukshusavitri/1667977046540/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1588132608243773441/zuQl_2d7_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Savitri Mumukshu - सावित्री मुमुक्षु</div>
<div style="text-align: center; font-size: 14px;">@mumukshusavitri</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Savitri Mumukshu - सावित्री मुमुक्षु.
| Data | Savitri Mumukshu - सावित्री मुमुक्षु |
| --- | --- |
| Tweets downloaded | 3238 |
| Retweets | 123 |
| Short tweets | 640 |
| Tweets kept | 2475 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/21w2o0rg/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @mumukshusavitri's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2m3kx4jk) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2m3kx4jk/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/mumukshusavitri')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
dccuchile/albert-large-spanish-finetuned-ner | [
"pytorch",
"albert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
tags:
- generated_from_trainer
model-index:
- name: GPT2-CLS-Finetuned-MBTI
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# GPT2-CLS-Finetuned-MBTI
This model was trained from scratch on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9571
- Cls loss: 1.9559
- Cls Accuracy: 0.6052
- Cls F1: 0.5956
- Cls Precision: 0.6180
- Cls Recall: 0.6052
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Cls loss | Cls Accuracy | Cls F1 | Cls Precision | Cls Recall | Validation Loss |
|:-------------:|:-----:|:-----:|:--------:|:------------:|:------:|:-------------:|:----------:|:---------------:|
| 2.0239 | 1.0 | 3470 | 1.7000 | 0.5262 | 0.4961 | 0.5438 | 0.5262 | 1.6998 |
| 1.5182 | 2.0 | 6940 | 1.8171 | 0.5873 | 0.5764 | 0.5971 | 0.5873 | 1.8181 |
| 1.3241 | 3.0 | 10410 | 1.9559 | 0.6052 | 0.5956 | 0.6180 | 0.6052 | 1.9571 |
### Framework versions
- Transformers 4.21.2
- Pytorch 1.12.1
- Datasets 2.4.0
- Tokenizers 0.12.1
|
dccuchile/albert-tiny-spanish-finetuned-ner | [
"pytorch",
"albert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"AlbertForTokenClassification"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
language:
- en
tags:
- align
- clip
license: apache-2.0
datasets:
- kakaobrain/coyo-700m
inference: false
---
# Model Details
This is an unofficial implementation of [ALIGN](https://arxiv.org/abs/2102.05918) trained on [COYO-700M](https://github.com/kakaobrain/coyo-dataset). The official ALIGN is trained on its dataset of 1.8B samples. That dataset is not released to the public. Instead, we trained our implementation of ALIGN model on [COYO-700M](https://github.com/kakaobrain/coyo-dataset).
It's developed by Kakao Brain to validate the performance of COYO-700M dataset on a large-scale model.
The training took about 8 days on TPU V3-512.
## Model Date
April 2022
## Model Type
This is dual encoder model where
- image encoder is using EfficientNet-B7 architecture
- text encoder is using BERT-base architecture
# Training data
This model is trained on [COYO-700M](https://github.com/kakaobrain/coyo-dataset) dataset.
# Evaluation results
| | Dataset | ImageNet | Flickr30k | | MsCOCO | |
|----------------------------------|:----------:|:--------:|:---------:|:-------:|:-------:|:-------:|
| | | KNN | I2T R@1 | T2I R@1 | I2T R@1 | T2I R@1 |
| ALIGN-L2-Large(Google) | ALIGN 1.8B | 76.4 | 88.6 | 75.7 | 58.6 | 45.6 |
| ALIGN-B7-Base(Google) | ALIGN 1.8B | 69.3 | - | - | 55.4 | 41.7 |
| COYO-ALIGN-B7-Base(Kakao Brain) | COYO-700M | 68.6 | 88.1 | 73.2 | 61.2 | 43.1 |
|
dccuchile/albert-base-spanish | [
"pytorch",
"tf",
"albert",
"pretraining",
"es",
"dataset:large_spanish_corpus",
"transformers",
"spanish",
"OpenCENIA"
]
| null | {
"architectures": [
"AlbertForPreTraining"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 586 | null | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 160 with parameters:
```
{'batch_size': 20, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 160,
"warmup_steps": 16,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
(2): Normalize()
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
dccuchile/bert-base-spanish-wwm-cased-finetuned-pos | [
"pytorch",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
license: apache-2.0
---
# Chinese-CLIP-RN50
## Introduction
This is the smallest model of the Chinese CLIP series, with ResNet-50 as the image encoder and RBT3 as the text encoder. Chinese CLIP is a simple implementation of CLIP on a large-scale dataset of around 200 million Chinese image-text pairs. For more details, please refer to our technical report https://arxiv.org/abs/2211.01335 and our official github repo https://github.com/OFA-Sys/Chinese-CLIP
## Use with the official API
We provide a simple code snippet to show how to use the API for Chinese-CLIP. For starters, please install cn_clip:
```bash
# to install the latest stable release
pip install cn_clip
# or install from source code
cd Chinese-CLIP
pip install -e .
```
After installation, use Chinese CLIP as shown below:
```python
import torch
from PIL import Image
import cn_clip.clip as clip
from cn_clip.clip import load_from_name, available_models
print("Available models:", available_models())
# Available models: ['ViT-B-16', 'ViT-L-14', 'ViT-L-14-336', 'ViT-H-14', 'RN50']
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = load_from_name("RN50", device=device, download_root='./')
model.eval()
image = preprocess(Image.open("examples/pokemon.jpeg")).unsqueeze(0).to(device)
text = clip.tokenize(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
# Normalize the features. Please use the normalized features for downstream tasks.
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
logits_per_image, logits_per_text = model.get_similarity(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
print("Label probs:", probs) # [[1.268734e-03 5.436878e-02 6.795761e-04 9.436829e-01]]
```
However, if you are not satisfied with only using the API, feel free to check our github repo https://github.com/OFA-Sys/Chinese-CLIP for more details about training and inference.
<br><br>
## Results
**MUGE Text-to-Image Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Setup</th><th colspan="4">Zero-shot</th><th colspan="4">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>42.7</td><td>69.0</td><td>78.0</td><td>63.2</td><td>52.7</td><td>77.9</td><td>85.6</td><td>72.1</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>49.5</td><td>75.7</td><td>83.2</td><td>69.5</td><td>60.1</td><td>82.9</td><td>89.4</td><td>77.5</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>63.0</td><td>84.1</td><td>89.2</td><td>78.8</td><td>68.9</td><td>88.7</td><td>93.1</td><td>83.6</td>
</tr>
</table>
<br>
**Flickr30K-CN Retrieval**:
<table border="1" width="120%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>51.7</td><td>78.9</td><td>86.3</td><td>77.4</td><td>94.5</td><td>97.0</td><td>76.1</td><td>94.8</td><td>97.5</td><td>92.7</td><td>99.1</td><td>99.6</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>60.9</td><td>86.8</td><td>92.7</td><td>84.4</td><td>96.7</td><td>98.4</td><td>77.6</td><td>96.7</td><td>98.9</td><td>95.6</td><td>99.8</td><td>100.0</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>71.2</td><td>91.4</td><td>95.5</td><td>83.8</td><td>96.9</td><td>98.6</td><td>81.6</td><td>97.5</td><td>98.8</td><td>95.3</td><td>99.7</td><td>100.0</td>
</tr>
</table>
<br>
**COCO-CN Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>53.4</td><td>80.2</td><td>90.1</td><td>74.0</td><td>94.4</td><td>98.1</td><td>55.2</td><td>81.0</td><td>90.6</td><td>73.3</td><td>94.0</td><td>98.0</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>56.4</td><td>85.0</td><td>93.1</td><td>79.1</td><td>96.5</td><td>98.9</td><td>63.3</td><td>89.3</td><td>95.7</td><td>79.3</td><td>97.1</td><td>98.7</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>69.2</td><td>89.9</td><td>96.1</td><td>81.5</td><td>96.9</td><td>99.1</td><td>63.0</td><td>86.6</td><td>92.9</td><td>83.5</td><td>97.3</td><td>99.2</td>
</tr>
</table>
<br>
**Zero-shot Image Classification**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th>CIFAR10</th><th>CIFAR100</th><th>DTD</th><th>EuroSAT</th><th>FER</th><th>FGVC</th><th>KITTI</th><th>MNIST</th><th>PC</th><th>VOC</th>
</tr>
<tr align="center">
<td width="150%">GIT</td><td>88.5</td><td>61.1</td><td>42.9</td><td>43.4</td><td>41.4</td><td>6.7</td><td>22.1</td><td>68.9</td><td>50.0</td><td>80.2</td>
</tr>
<tr align="center">
<td width="150%">ALIGN</td><td>94.9</td><td>76.8</td><td>66.1</td><td>52.1</td><td>50.8</td><td>25.0</td><td>41.2</td><td>74.0</td><td>55.2</td><td>83.0</td>
</tr>
<tr align="center">
<td width="150%">CLIP</td><td>94.9</td><td>77.0</td><td>56.0</td><td>63.0</td><td>48.3</td><td>33.3</td><td>11.5</td><td>79.0</td><td>62.3</td><td>84.0</td>
</tr>
<tr align="center">
<td width="150%">Wukong</td><td>95.4</td><td>77.1</td><td>40.9</td><td>50.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td>
</tr>
<tr align="center">
<td width="150%">CN-CLIP</td><td>96.0</td><td>79.7</td><td>51.2</td><td>52.0</td><td>55.1</td><td>26.2</td><td>49.9</td><td>79.4</td><td>63.5</td><td>84.9</td>
</tr>
</table>
<br>
## Citation
If you find Chinese CLIP helpful, feel free to cite our paper. Thanks for your support!
```
@article{chinese-clip,
title={Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese},
author={Yang, An and Pan, Junshu and Lin, Junyang and Men, Rui and Zhang, Yichang and Zhou, Jingren and Zhou, Chang},
journal={arXiv preprint arXiv:2211.01335},
year={2022}
}
```
<br> |
dccuchile/bert-base-spanish-wwm-uncased-finetuned-mldoc | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 39 | null | ---
tags:
- vision
widget:
- src: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/festival.jpg
candidate_labels: 灯笼, 鞭炮, 对联
example_title: festival
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
candidate_labels: 音乐表演, 体育运动
example_title: cat & dog
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
candidate_labels: 梅西, C罗, 马奎尔
example_title: football
---
# Chinese-CLIP-ViT-Large-Patch14
## Introduction
This is the large-version of the Chinese CLIP, with ViT-L/14 as the image encoder and RoBERTa-wwm-base as the text encoder. Chinese CLIP is a simple implementation of CLIP on a large-scale dataset of around 200 million Chinese image-text pairs. For more details, please refer to our technical report https://arxiv.org/abs/2211.01335 and our official github repo https://github.com/OFA-Sys/Chinese-CLIP (Welcome to star! 🔥🔥)
## Use with the official API
We provide a simple code snippet to show how to use the API of Chinese-CLIP to compute the image & text embeddings and similarities.
```python
from PIL import Image
import requests
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-large-patch14")
processor = ChineseCLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-large-patch14")
url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
# Squirtle, Bulbasaur, Charmander, Pikachu in English
texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]
# compute image feature
inputs = processor(images=image, return_tensors="pt")
image_features = model.get_image_features(**inputs)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute text features
inputs = processor(text=texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute image-text similarity scores
inputs = processor(text=texts, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # probs: [[0.0066, 0.0211, 0.0031, 0.9692]]
```
However, if you are not satisfied with only using the API, feel free to check our github repo https://github.com/OFA-Sys/Chinese-CLIP for more details about training and inference.
<br><br>
## Results
**MUGE Text-to-Image Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Setup</th><th colspan="4">Zero-shot</th><th colspan="4">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>42.7</td><td>69.0</td><td>78.0</td><td>63.2</td><td>52.7</td><td>77.9</td><td>85.6</td><td>72.1</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>49.5</td><td>75.7</td><td>83.2</td><td>69.5</td><td>60.1</td><td>82.9</td><td>89.4</td><td>77.5</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>63.0</td><td>84.1</td><td>89.2</td><td>78.8</td><td>68.9</td><td>88.7</td><td>93.1</td><td>83.6</td>
</tr>
</table>
<br>
**Flickr30K-CN Retrieval**:
<table border="1" width="120%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>51.7</td><td>78.9</td><td>86.3</td><td>77.4</td><td>94.5</td><td>97.0</td><td>76.1</td><td>94.8</td><td>97.5</td><td>92.7</td><td>99.1</td><td>99.6</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>60.9</td><td>86.8</td><td>92.7</td><td>84.4</td><td>96.7</td><td>98.4</td><td>77.6</td><td>96.7</td><td>98.9</td><td>95.6</td><td>99.8</td><td>100.0</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>71.2</td><td>91.4</td><td>95.5</td><td>83.8</td><td>96.9</td><td>98.6</td><td>81.6</td><td>97.5</td><td>98.8</td><td>95.3</td><td>99.7</td><td>100.0</td>
</tr>
</table>
<br>
**COCO-CN Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>53.4</td><td>80.2</td><td>90.1</td><td>74.0</td><td>94.4</td><td>98.1</td><td>55.2</td><td>81.0</td><td>90.6</td><td>73.3</td><td>94.0</td><td>98.0</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>56.4</td><td>85.0</td><td>93.1</td><td>79.1</td><td>96.5</td><td>98.9</td><td>63.3</td><td>89.3</td><td>95.7</td><td>79.3</td><td>97.1</td><td>98.7</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>69.2</td><td>89.9</td><td>96.1</td><td>81.5</td><td>96.9</td><td>99.1</td><td>63.0</td><td>86.6</td><td>92.9</td><td>83.5</td><td>97.3</td><td>99.2</td>
</tr>
</table>
<br>
**Zero-shot Image Classification**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th>CIFAR10</th><th>CIFAR100</th><th>DTD</th><th>EuroSAT</th><th>FER</th><th>FGVC</th><th>KITTI</th><th>MNIST</th><th>PC</th><th>VOC</th>
</tr>
<tr align="center">
<td width="150%">GIT</td><td>88.5</td><td>61.1</td><td>42.9</td><td>43.4</td><td>41.4</td><td>6.7</td><td>22.1</td><td>68.9</td><td>50.0</td><td>80.2</td>
</tr>
<tr align="center">
<td width="150%">ALIGN</td><td>94.9</td><td>76.8</td><td>66.1</td><td>52.1</td><td>50.8</td><td>25.0</td><td>41.2</td><td>74.0</td><td>55.2</td><td>83.0</td>
</tr>
<tr align="center">
<td width="150%">CLIP</td><td>94.9</td><td>77.0</td><td>56.0</td><td>63.0</td><td>48.3</td><td>33.3</td><td>11.5</td><td>79.0</td><td>62.3</td><td>84.0</td>
</tr>
<tr align="center">
<td width="150%">Wukong</td><td>95.4</td><td>77.1</td><td>40.9</td><td>50.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td>
</tr>
<tr align="center">
<td width="150%">CN-CLIP</td><td>96.0</td><td>79.7</td><td>51.2</td><td>52.0</td><td>55.1</td><td>26.2</td><td>49.9</td><td>79.4</td><td>63.5</td><td>84.9</td>
</tr>
</table>
<br>
## Citation
If you find Chinese CLIP helpful, feel free to cite our paper. Thanks for your support!
```
@article{chinese-clip,
title={Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese},
author={Yang, An and Pan, Junshu and Lin, Junyang and Men, Rui and Zhang, Yichang and Zhou, Jingren and Zhou, Chang},
journal={arXiv preprint arXiv:2211.01335},
year={2022}
}
```
<br> |
dccuchile/distilbert-base-spanish-uncased-finetuned-pos | [
"pytorch",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"DistilBertForTokenClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
tags:
- vision
widget:
- src: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/festival.jpg
candidate_labels: 灯笼, 鞭炮, 对联
example_title: festival
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
candidate_labels: 音乐表演, 体育运动
example_title: cat & dog
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
candidate_labels: 梅西, C罗, 马奎尔
example_title: football
---
# Chinese-CLIP-ViT-Large-Patch14-336px
## Introduction
This is the large-version of the Chinese CLIP, with ViT-L/14@336px as the image encoder and RoBERTa-wwm-base as the text encoder. Chinese CLIP is a simple implementation of CLIP on a large-scale dataset of around 200 million Chinese image-text pairs. For more details, please refer to our technical report https://arxiv.org/abs/2211.01335 and our official github repo https://github.com/OFA-Sys/Chinese-CLIP (Welcome to star! 🔥🔥)
## Use with the official API
We provide a simple code snippet to show how to use the API of Chinese-CLIP to compute the image & text embeddings and similarities.
```python
from PIL import Image
import requests
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-large-patch14-336px")
processor = ChineseCLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-large-patch14-336px")
url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
# Squirtle, Bulbasaur, Charmander, Pikachu in English
texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]
# compute image feature
inputs = processor(images=image, return_tensors="pt")
image_features = model.get_image_features(**inputs)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute text features
inputs = processor(text=texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute image-text similarity scores
inputs = processor(text=texts, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # probs: [[0.0219, 0.0316, 0.0043, 0.9423]]
```
However, if you are not satisfied with only using the API, feel free to check our github repo https://github.com/OFA-Sys/Chinese-CLIP for more details about training and inference.
<br><br>
## Results
**MUGE Text-to-Image Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Setup</th><th colspan="4">Zero-shot</th><th colspan="4">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>42.7</td><td>69.0</td><td>78.0</td><td>63.2</td><td>52.7</td><td>77.9</td><td>85.6</td><td>72.1</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>49.5</td><td>75.7</td><td>83.2</td><td>69.5</td><td>60.1</td><td>82.9</td><td>89.4</td><td>77.5</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>63.0</td><td>84.1</td><td>89.2</td><td>78.8</td><td>68.9</td><td>88.7</td><td>93.1</td><td>83.6</td>
</tr>
</table>
<br>
**Flickr30K-CN Retrieval**:
<table border="1" width="120%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>51.7</td><td>78.9</td><td>86.3</td><td>77.4</td><td>94.5</td><td>97.0</td><td>76.1</td><td>94.8</td><td>97.5</td><td>92.7</td><td>99.1</td><td>99.6</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>60.9</td><td>86.8</td><td>92.7</td><td>84.4</td><td>96.7</td><td>98.4</td><td>77.6</td><td>96.7</td><td>98.9</td><td>95.6</td><td>99.8</td><td>100.0</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>71.2</td><td>91.4</td><td>95.5</td><td>83.8</td><td>96.9</td><td>98.6</td><td>81.6</td><td>97.5</td><td>98.8</td><td>95.3</td><td>99.7</td><td>100.0</td>
</tr>
</table>
<br>
**COCO-CN Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>53.4</td><td>80.2</td><td>90.1</td><td>74.0</td><td>94.4</td><td>98.1</td><td>55.2</td><td>81.0</td><td>90.6</td><td>73.3</td><td>94.0</td><td>98.0</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>56.4</td><td>85.0</td><td>93.1</td><td>79.1</td><td>96.5</td><td>98.9</td><td>63.3</td><td>89.3</td><td>95.7</td><td>79.3</td><td>97.1</td><td>98.7</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>69.2</td><td>89.9</td><td>96.1</td><td>81.5</td><td>96.9</td><td>99.1</td><td>63.0</td><td>86.6</td><td>92.9</td><td>83.5</td><td>97.3</td><td>99.2</td>
</tr>
</table>
<br>
**Zero-shot Image Classification**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th>CIFAR10</th><th>CIFAR100</th><th>DTD</th><th>EuroSAT</th><th>FER</th><th>FGVC</th><th>KITTI</th><th>MNIST</th><th>PC</th><th>VOC</th>
</tr>
<tr align="center">
<td width="150%">GIT</td><td>88.5</td><td>61.1</td><td>42.9</td><td>43.4</td><td>41.4</td><td>6.7</td><td>22.1</td><td>68.9</td><td>50.0</td><td>80.2</td>
</tr>
<tr align="center">
<td width="150%">ALIGN</td><td>94.9</td><td>76.8</td><td>66.1</td><td>52.1</td><td>50.8</td><td>25.0</td><td>41.2</td><td>74.0</td><td>55.2</td><td>83.0</td>
</tr>
<tr align="center">
<td width="150%">CLIP</td><td>94.9</td><td>77.0</td><td>56.0</td><td>63.0</td><td>48.3</td><td>33.3</td><td>11.5</td><td>79.0</td><td>62.3</td><td>84.0</td>
</tr>
<tr align="center">
<td width="150%">Wukong</td><td>95.4</td><td>77.1</td><td>40.9</td><td>50.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td>
</tr>
<tr align="center">
<td width="150%">CN-CLIP</td><td>96.0</td><td>79.7</td><td>51.2</td><td>52.0</td><td>55.1</td><td>26.2</td><td>49.9</td><td>79.4</td><td>63.5</td><td>84.9</td>
</tr>
</table>
<br>
## Citation
If you find Chinese CLIP helpful, feel free to cite our paper. Thanks for your support!
```
@article{chinese-clip,
title={Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese},
author={Yang, An and Pan, Junshu and Lin, Junyang and Men, Rui and Zhang, Yichang and Zhou, Jingren and Zhou, Chang},
journal={arXiv preprint arXiv:2211.01335},
year={2022}
}
```
<br> |
dccuchile/distilbert-base-spanish-uncased-finetuned-xnli | [
"pytorch",
"distilbert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"DistilBertForSequenceClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 31 | 2022-11-09T09:45:11Z | ---
tags:
- vision
widget:
- src: https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16/resolve/main/festival.jpg
candidate_labels: 灯笼, 鞭炮, 对联
example_title: festival
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
candidate_labels: 音乐表演, 体育运动
example_title: cat & dog
- src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
candidate_labels: 梅西, C罗, 马奎尔
example_title: football
---
# Chinese-CLIP-ViT-Huge-Patch14
## Introduction
This is the huge-version of the Chinese CLIP, with ViT-H/14 as the image encoder and RoBERTa-wwm-large as the text encoder. Chinese CLIP is a simple implementation of CLIP on a large-scale dataset of around 200 million Chinese image-text pairs. For more details, please refer to our technical report https://arxiv.org/abs/2211.01335 and our official github repo https://github.com/OFA-Sys/Chinese-CLIP (Welcome to star! 🔥🔥)
## Use with the official API
We provide a simple code snippet to show how to use the API of Chinese-CLIP to compute the image & text embeddings and similarities.
```python
from PIL import Image
import requests
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-huge-patch14")
processor = ChineseCLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-huge-patch14")
url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
image = Image.open(requests.get(url, stream=True).raw)
# Squirtle, Bulbasaur, Charmander, Pikachu in English
texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]
# compute image feature
inputs = processor(images=image, return_tensors="pt")
image_features = model.get_image_features(**inputs)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute text features
inputs = processor(text=texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize
# compute image-text similarity scores
inputs = processor(text=texts, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # probs: [[1.1419e-02, 1.0478e-02, 5.2018e-04, 9.7758e-01]]
```
However, if you are not satisfied with only using the API, feel free to check our github repo https://github.com/OFA-Sys/Chinese-CLIP for more details about training and inference.
<br><br>
## Results
**MUGE Text-to-Image Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Setup</th><th colspan="4">Zero-shot</th><th colspan="4">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td><td>R@1</td><td>R@5</td><td>R@10</td><td>MR</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>42.7</td><td>69.0</td><td>78.0</td><td>63.2</td><td>52.7</td><td>77.9</td><td>85.6</td><td>72.1</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>49.5</td><td>75.7</td><td>83.2</td><td>69.5</td><td>60.1</td><td>82.9</td><td>89.4</td><td>77.5</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>63.0</td><td>84.1</td><td>89.2</td><td>78.8</td><td>68.9</td><td>88.7</td><td>93.1</td><td>83.6</td>
</tr>
</table>
<br>
**Flickr30K-CN Retrieval**:
<table border="1" width="120%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>51.7</td><td>78.9</td><td>86.3</td><td>77.4</td><td>94.5</td><td>97.0</td><td>76.1</td><td>94.8</td><td>97.5</td><td>92.7</td><td>99.1</td><td>99.6</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>60.9</td><td>86.8</td><td>92.7</td><td>84.4</td><td>96.7</td><td>98.4</td><td>77.6</td><td>96.7</td><td>98.9</td><td>95.6</td><td>99.8</td><td>100.0</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>71.2</td><td>91.4</td><td>95.5</td><td>83.8</td><td>96.9</td><td>98.6</td><td>81.6</td><td>97.5</td><td>98.8</td><td>95.3</td><td>99.7</td><td>100.0</td>
</tr>
</table>
<br>
**COCO-CN Retrieval**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th colspan="6">Text-to-Image</th><th colspan="6">Image-to-Text</th>
</tr>
<tr align="center">
<th>Setup</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th><th colspan="3">Zero-shot</th><th colspan="3">Finetune</th>
</tr>
<tr align="center">
<td>Metric</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td><td>R@1</td><td>R@5</td><td>R@10</td>
</tr>
<tr align="center">
<td width="120%">Wukong</td><td>53.4</td><td>80.2</td><td>90.1</td><td>74.0</td><td>94.4</td><td>98.1</td><td>55.2</td><td>81.0</td><td>90.6</td><td>73.3</td><td>94.0</td><td>98.0</td>
</tr>
<tr align="center">
<td width="120%">R2D2</td><td>56.4</td><td>85.0</td><td>93.1</td><td>79.1</td><td>96.5</td><td>98.9</td><td>63.3</td><td>89.3</td><td>95.7</td><td>79.3</td><td>97.1</td><td>98.7</td>
</tr>
<tr align="center">
<td width="120%">CN-CLIP</td><td>69.2</td><td>89.9</td><td>96.1</td><td>81.5</td><td>96.9</td><td>99.1</td><td>63.0</td><td>86.6</td><td>92.9</td><td>83.5</td><td>97.3</td><td>99.2</td>
</tr>
</table>
<br>
**Zero-shot Image Classification**:
<table border="1" width="100%">
<tr align="center">
<th>Task</th><th>CIFAR10</th><th>CIFAR100</th><th>DTD</th><th>EuroSAT</th><th>FER</th><th>FGVC</th><th>KITTI</th><th>MNIST</th><th>PC</th><th>VOC</th>
</tr>
<tr align="center">
<td width="150%">GIT</td><td>88.5</td><td>61.1</td><td>42.9</td><td>43.4</td><td>41.4</td><td>6.7</td><td>22.1</td><td>68.9</td><td>50.0</td><td>80.2</td>
</tr>
<tr align="center">
<td width="150%">ALIGN</td><td>94.9</td><td>76.8</td><td>66.1</td><td>52.1</td><td>50.8</td><td>25.0</td><td>41.2</td><td>74.0</td><td>55.2</td><td>83.0</td>
</tr>
<tr align="center">
<td width="150%">CLIP</td><td>94.9</td><td>77.0</td><td>56.0</td><td>63.0</td><td>48.3</td><td>33.3</td><td>11.5</td><td>79.0</td><td>62.3</td><td>84.0</td>
</tr>
<tr align="center">
<td width="150%">Wukong</td><td>95.4</td><td>77.1</td><td>40.9</td><td>50.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td>
</tr>
<tr align="center">
<td width="150%">CN-CLIP</td><td>96.0</td><td>79.7</td><td>51.2</td><td>52.0</td><td>55.1</td><td>26.2</td><td>49.9</td><td>79.4</td><td>63.5</td><td>84.9</td>
</tr>
</table>
<br>
## Citation
If you find Chinese CLIP helpful, feel free to cite our paper. Thanks for your support!
```
@article{chinese-clip,
title={Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese},
author={Yang, An and Pan, Junshu and Lin, Junyang and Men, Rui and Zhang, Yichang and Zhou, Jingren and Zhou, Chang},
journal={arXiv preprint arXiv:2211.01335},
year={2022}
}
```
<br> |
CennetOguz/distilbert-base-uncased-finetuned-recipe-1 | [
"pytorch",
"tensorboard",
"distilbert",
"fill-mask",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"DistilBertForMaskedLM"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-11-09T09:53:52Z | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: CR_ELECTRA_5E
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# CR_ELECTRA_5E
This model is a fine-tuned version of [google/electra-base-discriminator](https://huggingface.co/google/electra-base-discriminator) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3778
- Accuracy: 0.9133
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.643 | 0.33 | 50 | 0.6063 | 0.66 |
| 0.5093 | 0.66 | 100 | 0.4277 | 0.84 |
| 0.2986 | 0.99 | 150 | 0.3019 | 0.8933 |
| 0.2343 | 1.32 | 200 | 0.2910 | 0.9 |
| 0.1808 | 1.66 | 250 | 0.2892 | 0.9133 |
| 0.1922 | 1.99 | 300 | 0.3397 | 0.8867 |
| 0.1623 | 2.32 | 350 | 0.2847 | 0.92 |
| 0.1206 | 2.65 | 400 | 0.2918 | 0.9133 |
| 0.1518 | 2.98 | 450 | 0.3163 | 0.9067 |
| 0.1029 | 3.31 | 500 | 0.3667 | 0.8867 |
| 0.1133 | 3.64 | 550 | 0.3562 | 0.9067 |
| 0.0678 | 3.97 | 600 | 0.3394 | 0.9067 |
| 0.0461 | 4.3 | 650 | 0.3821 | 0.9067 |
| 0.0917 | 4.64 | 700 | 0.3774 | 0.9133 |
| 0.0633 | 4.97 | 750 | 0.3778 | 0.9133 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.13.0
- Datasets 2.3.2
- Tokenizers 0.13.1
|
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate-1 | [
"pytorch",
"distilbert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"DistilBertForMaskedLM"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
tags:
- generated_from_trainer
- stable diffusion
- beautiful
- masterpiece
datasets:
- Gustavosta/Stable-Diffusion-Prompts
model-index:
- name: tiny-gpt2-magicprompt
results: []
widget:
- text: "morning sun over Jakarta"
example_title: "morning sun"
- text: "WARNING: pip is"
example_title: "pip"
- text: "sentient cheese"
example_title: "sentient cheese"
- text: "cheeps are"
example_title: "cheeps"
parameters:
min_length: 32
max_length: 64
no_repeat_ngram_size: 1
do_sample: True
---
# tiny-gpt2-magicprompt
~~Generate/augment your prompt, stable diffusion style.~~ Enter a new dimension of creativity
This model is a fine-tuned version of [sshleifer/tiny-gpt2](https://huggingface.co/sshleifer/tiny-gpt2) on the Gustavosta/Stable-Diffusion-Prompts dataset.
It achieves the following results on the evaluation set:
- Loss: 10.7918
- perplexity: 48618.8756
## Intended uses & limitations
???
## Training and evaluation data
refer to the `Gustavosta/Stable-Diffusion-Prompts` dataset.
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 8
- eval_batch_size: 2
- seed: 42
- distributed_type: multi-GPU
- num_devices: 2
- gradient_accumulation_steps: 32
- total_train_batch_size: 512
- total_eval_batch_size: 4
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.05
- num_epochs: 10.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 10.8201 | 0.96 | 16 | 10.8191 |
| 10.8167 | 1.96 | 32 | 10.8145 |
| 10.8117 | 2.96 | 48 | 10.8095 |
| 10.8058 | 3.96 | 64 | 10.8025 |
| 10.7997 | 4.96 | 80 | 10.7989 |
| 10.7959 | 5.96 | 96 | 10.7947 |
| 10.7934 | 6.96 | 112 | 10.7925 |
| 10.7924 | 7.96 | 128 | 10.7919 |
| 10.7921 | 8.96 | 144 | 10.7918 |
| 10.792 | 9.96 | 160 | 10.7918 |
### Framework versions
- Transformers 4.25.0.dev0
- Pytorch 1.13.0+cu117
- Datasets 2.6.1
- Tokenizers 0.13.1
|
Certified-Zoomer/DialoGPT-small-rick | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- generated_from_trainer
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: toxicBERT-params
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# toxicBERT-params
This model is a fine-tuned version of [unitary/toxic-bert](https://huggingface.co/unitary/toxic-bert) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2938
- Precision: 0.0
- Recall: 0.0
- F1: 0.0
- Accuracy: 0.9174
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 7
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:---:|:--------:|
| No log | 1.0 | 174 | 0.2364 | 0.0 | 0.0 | 0.0 | 0.9077 |
| No log | 2.0 | 348 | 0.2113 | 0.0 | 0.0 | 0.0 | 0.9190 |
| 0.2654 | 3.0 | 522 | 0.2195 | 0.0 | 0.0 | 0.0 | 0.9223 |
| 0.2654 | 4.0 | 696 | 0.2401 | 0.0 | 0.0 | 0.0 | 0.9211 |
| 0.2654 | 5.0 | 870 | 0.2679 | 0.0 | 0.0 | 0.0 | 0.9198 |
| 0.0844 | 6.0 | 1044 | 0.2930 | 0.0 | 0.0 | 0.0 | 0.9138 |
| 0.0844 | 7.0 | 1218 | 0.2938 | 0.0 | 0.0 | 0.0 | 0.9174 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Chaddmckay/Cdm | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
- stable diffusion
- diffusion
- text2image
- prompt augment
- prompt engineering
datasets:
- Gustavosta/Stable-Diffusion-Prompts
model-index:
- name: distilgpt2-magicprompt-SD
results: []
thumbnail: https://i.ibb.co/WkmTnZD/image.png
widget:
- text: "morning sun over Jakarta"
example_title: "morning sun"
- text: "WARNING: pip is"
example_title: "pip"
- text: "sentient cheese"
example_title: "sentient cheese"
- text: "cheeps are"
example_title: "cheeps"
- text: "avocado armchair"
example_title: "creative prompt"
- text: "Landscape of"
example_title: "landscape"
parameters:
min_length: 16
max_new_tokens: 24
no_repeat_ngram_size: 1
do_sample: True
---
# distilgpt2-magicprompt-SD
[](https://colab.research.google.com/gist/pszemraj/bdddf9c3fe92d1ac2654730016d64c80/demo-distilgpt2-magicprompt.ipynb)
Generate/augment your prompt, stable diffusion style.
This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the Gustavosta/Stable-Diffusion-Prompts dataset.
It achieves the following results on the evaluation set:
- Loss: 1.3089
- eval_steps_per_second = 17.201
- perplexity = 3.7022
## example
Results in (_DALL-E, but you get the idea_):

<br>
this `distilgpt2` version is probably small/fast enough to be used locally on CPU!
## basic usage
install transformers as needed:
```bash
pip install -U transformers
```
load and query through a `pipeline` object:
```python
from transformers import pipeline
model_tag = "pszemraj/distilgpt2-magicprompt-SD"
generator = pipeline(
"text-generation",
model=model_tag,
)
prompt = "The Answer to Why"
result = generator(
prompt,
max_new_tokens=24,
) # generate, adjust/add kwargs as needed
print(result[0]["generated_text"])
```
## Training and evaluation data
refer to the `Gustavosta/Stable-Diffusion-Prompts` dataset.
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.001
- train_batch_size: 16
- eval_batch_size: 2
- seed: 42
- distributed_type: multi-GPU
- num_devices: 2
- gradient_accumulation_steps: 8
- total_train_batch_size: 256
- total_eval_batch_size: 4
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: cosine
- lr_scheduler_warmup_ratio: 0.05
- num_epochs: 10.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.7061 | 0.99 | 33 | 2.5859 |
| 2.08 | 1.99 | 66 | 1.9965 |
| 1.7623 | 2.99 | 99 | 1.7248 |
| 1.5408 | 3.99 | 132 | 1.5449 |
| 1.4147 | 4.99 | 165 | 1.4437 |
| 1.3593 | 5.99 | 198 | 1.3768 |
| 1.2703 | 6.99 | 231 | 1.3362 |
| 1.2528 | 7.99 | 264 | 1.3175 |
| 1.1981 | 8.99 | 297 | 1.3091 |
| 1.2117 | 9.99 | 330 | 1.3089 |
### Framework versions
- Transformers 4.25.0.dev0
- Pytorch 1.13.0+cu117
- Datasets 2.6.1
- Tokenizers 0.13.1
|
Chaewon/mnmt_decoder_en | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: distilbert-base-uncased-finetuned-squad
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-squad
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2005
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 1.2454 | 1.0 | 5533 | 1.2005 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Chaewon/mnmt_decoder_en_gpt2 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 229.17 +/- 21.29
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
ChaitanyaU/FineTuneLM | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: whisper_nosp_0005
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# whisper_nosp_0005
This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 1.9349
- Train Accuracy: 0.0157
- Validation Loss: 1.6630
- Validation Accuracy: 0.0172
- Epoch: 4
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch |
|:----------:|:--------------:|:---------------:|:-------------------:|:-----:|
| 7.5559 | 0.0010 | 6.3853 | 0.0013 | 0 |
| 6.3227 | 0.0021 | 5.7023 | 0.0038 | 1 |
| 4.9825 | 0.0063 | 3.6302 | 0.0109 | 2 |
| 2.9413 | 0.0126 | 2.1959 | 0.0154 | 3 |
| 1.9349 | 0.0157 | 1.6630 | 0.0172 | 4 |
### Framework versions
- Transformers 4.25.0.dev0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Chakita/Friends | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-full-small_gpu_deneme4
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-full-small_gpu_deneme4
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Champion/test_upload_vox2_wavlm_epoch8 | [
"sidekit",
"audio"
]
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: ai5_sum_model
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# ai5_sum_model
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: nan
- Validation Loss: nan
- Epoch: 3
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'inner_optimizer': {'class_name': 'AdamWeightDecay', 'config': {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.05, 'decay_steps': 165000, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}}, 'dynamic': True, 'initial_scale': 32768.0, 'dynamic_growth_steps': 2000}
- training_precision: mixed_float16
### Training results
| Train Loss | Validation Loss | Epoch |
|:----------:|:---------------:|:-----:|
| nan | nan | 0 |
| nan | nan | 1 |
| nan | nan | 2 |
| nan | nan | 3 |
### Framework versions
- Transformers 4.24.0
- TensorFlow 2.9.2
- Tokenizers 0.13.2
|
Chan/distilroberta-base-finetuned-wikitext2 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: whisper_nosp_0010
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# whisper_nosp_0010
This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.7431
- Train Accuracy: 0.0199
- Validation Loss: 0.9603
- Validation Accuracy: 0.0196
- Epoch: 9
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch |
|:----------:|:--------------:|:---------------:|:-------------------:|:-----:|
| 7.5559 | 0.0010 | 6.3853 | 0.0013 | 0 |
| 6.3227 | 0.0021 | 5.7023 | 0.0038 | 1 |
| 4.9825 | 0.0063 | 3.6302 | 0.0109 | 2 |
| 2.9413 | 0.0126 | 2.1959 | 0.0154 | 3 |
| 1.9349 | 0.0157 | 1.6630 | 0.0172 | 4 |
| 1.4741 | 0.0171 | 1.3813 | 0.0181 | 5 |
| 1.1975 | 0.0181 | 1.2161 | 0.0186 | 6 |
| 1.0048 | 0.0188 | 1.0990 | 0.0191 | 7 |
| 0.8598 | 0.0194 | 1.0165 | 0.0194 | 8 |
| 0.7431 | 0.0199 | 0.9603 | 0.0196 | 9 |
### Framework versions
- Transformers 4.25.0.dev0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Chandanbhat/distilbert-base-uncased-finetuned-cola | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: creativeml-openrail-m
tags:
- stable-diffusion
- text-to-image
---
Buy me a coffee if you like this project ;)
<a href="https://www.buymeacoffee.com/s3nh"><img src="https://www.buymeacoffee.com/assets/img/guidelines/download-assets-sm-1.svg" alt=""></a>
### Arcane based Artwork Diffusion Model
I present you fine tuned model of stable-diffusion-v1-5, which heavily based of
work of great artworks from Legend of Zelda: Breath of The Wild.
Use the tokens **_botw style_** in your prompts for the effect.
Model was trained using the diffusers library, which based on Dreambooth implementation.
Training steps included:
- prior preservation loss
- train-text-encoder fine tuning
### 🧨 Diffusers
This model can be used just like any other Stable Diffusion model. For more information,
please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
```python
#!pip install diffusers transformers scipy torch
from diffusers import StableDiffusionPipeline
import torch
model_id = "s3nh/s3nh/zelda-botw-stable-diffusion"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")
prompt = "Rain forest, botw style"
image = pipe(prompt).images[0]
image.save("./example_output.png")
```
# Gallery
## Grumpy cat, botw style
<img src = "https://huggingface.co/s3nh/zelda-botw-stable-diffusion/resolve/main/grumpy cat0.png">
<img src = "https://huggingface.co/s3nh/zelda-botw-stable-diffusion/resolve/main/grumpy cat1.png">
<img src = "https://huggingface.co/s3nh/zelda-botw-stable-diffusion/resolve/main/grumpy cat2.png">
<img src = "https://huggingface.co/s3nh/zelda-botw-stable-diffusion/resolve/main/grumpy cat3.png">
## Landscape, botw style




## License
This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
The CreativeML OpenRAIL License specifies:
1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
|
CharlieChen/feedback-bigbird | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert-base-uncased-finetuned-cola
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: cola
split: train
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.4467807407096838
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-cola
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4908
- Matthews Correlation: 0.4468
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.5214 | 1.0 | 535 | 0.4908 | 0.4468 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Cheatham/xlm-roberta-base-finetuned | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 20 | null | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 2100 with parameters:
```
{'batch_size': 2, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 2100,
"warmup_steps": 210,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Cheatham/xlm-roberta-large-finetuned-d12 | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 20 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-ascii-small
A small-size BERT Language Model pre-trained by predicting the summation of the **ASCII** code values of the characters in a masked token as a pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
Cheatham/xlm-roberta-large-finetuned-d12_2 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language: en
license: apache-2.0
library_name: diffusers
tags: []
datasets: huggan/smithsonian_butterflies_subset
metrics: []
---
<!-- This model card has been generated automatically according to the information the training script had access to. You
should probably proofread and complete it, then remove this comment. -->
# ddpm-butterflies-128
## Model description
This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library
on the `huggan/smithsonian_butterflies_subset` dataset.
## Intended uses & limitations
#### How to use
```python
# TODO: add an example code snippet for running this diffusion pipeline
```
#### Limitations and bias
[TODO: provide examples of latent issues and potential remediations]
## Training data
[TODO: describe the data used to train the model]
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 16
- gradient_accumulation_steps: 1
- optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None
- lr_scheduler: None
- lr_warmup_steps: 500
- ema_inv_gamma: None
- ema_inv_gamma: None
- ema_inv_gamma: None
- mixed_precision: fp16
### Training results
📈 [TensorBoard logs](https://huggingface.co/alibidaran/ddpm-butterflies-128/tensorboard?#scalars)
|
Cheatham/xlm-roberta-large-finetuned-r01 | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 23 | null | ---
language:
- el
license: apache-2.0
tags:
- hf-asr-leaderboard
- whisper-medium
- mozilla-foundation/common_voice_11_0
- greek
- whisper-event
- generated_from_trainer
- whisper-event
datasets:
- mozilla-foundation/common_voice_11_0
- google/fleurs
metrics:
- wer
model-index:
- name: Whisper Medium El Greco
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: mozilla-foundation/common_voice_11_0
type: mozilla-foundation/common_voice_11_0
config: el
split: test
metrics:
- name: Wer
type: wer
value: 10.7448
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Whisper Medium El Greco
This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset.
It achieves the following results on the evaluation set:
- eval_loss: 0.4245
- eval_wer: 10.7448
- eval_runtime: 1107.1212
- eval_samples_per_second: 1.532
- eval_steps_per_second: 0.096
- epoch: 33.98
- step: 7000
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 32
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- training_steps: 7000
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.26.0.dev0
- Pytorch 1.13.0+cu117
- Datasets 2.7.1.dev0
- Tokenizers 0.13.2
|
Cheatham/xlm-roberta-large-finetuned | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 20 | null | ---
license: mit
---
### Banana from [vibrant venture](https://store.steampowered.com/app/1264520), on [that thing](https://huggingface.co/hakurei/waifu-diffusion) via Dreambooth
#### model by no3
This model fine-tuned Banana from [vibrant venture](https://store.steampowered.com/app/1264520) taught to [that thing](https://huggingface.co/hakurei/waifu-diffusion) with Dreambooth.
It can be used by modifying the `instance_prompt`: **sks ba**
You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb).
And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
### Note
This model is not **heavily tested** recommended generic prompts if the output not creative is in progress, also it may be an update for this model.
If you have issues or questions feel free to visit the Community Tab and start discussion about it.
Here are the images used for training this concept:





[And the bad edited one](https://huggingface.co/no3/banana-wd-1.3-beta1/resolve/main/concept_images/6.jpg) |
Cheatham/xlm-roberta-large-finetuned4 | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 20 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-fc-small
A small-size BERT Language Model with a **first character** prediction pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
CheonggyeMountain-Sherpa/kogpt-trinity-poem | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 15 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-mlm-small
A small-size BERT Language Model with an **MLM** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
Chertilasus/main | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-rand-small
A small-size BERT Language Model with a **random** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
Chikita1/www_stash_stock | [
"license:bsd-3-clause-clear"
]
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- bert
license: cc-by-4.0
---
## bert-sr-small
A small-size BERT Language Model with a **shuffle + random** pre-training objective. For more details about the pre-training objective and the pre-training hyperparameters, please refer to [How does the pre-training objective affect what large language models learn about linguistic properties?](https://aclanthology.org/2022.acl-short.16/)
## License
CC BY 4.0
## Citation
If you use this model, please cite the following paper:
```
@inproceedings{alajrami2022does,
title={How does the pre-training objective affect what large language models learn about linguistic properties?},
author={Alajrami, Ahmed and Aletras, Nikolaos},
booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)},
pages={131--147},
year={2022}
}
``` |
Ching/negation_detector | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
]
| question-answering | {
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: pii_toxicity
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# pii_toxicity
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.1
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.01
- training_steps: 50354
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.5.1
- Tokenizers 0.11.6
|
Chinmay/mlindia | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- autotrain
- tabular
- regression
- tabular-regression
datasets:
- Robertooo/autotrain-data-hmaet
co2_eq_emissions:
emissions: 0.04056452250649151
---
# Model Trained Using AutoTrain
- Problem type: Single Column Regression
- Model ID: 2037366889
- CO2 Emissions (in grams): 0.0406
## Validation Metrics
- Loss: 0.003
- R2: 0.999
- MSE: 0.000
- MAE: 0.001
- RMSLE: 0.002
## Usage
```python
import json
import joblib
import pandas as pd
model = joblib.load('model.joblib')
config = json.load(open('config.json'))
features = config['features']
# data = pd.read_csv("data.csv")
data = data[features]
data.columns = ["feat_" + str(col) for col in data.columns]
predictions = model.predict(data) # or model.predict_proba(data)
``` |
Chiuchiyin/DialoGPT-small-Donald | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-11-09T12:08:44Z | ---
tags:
- autotrain
- tabular
- regression
- tabular-regression
datasets:
- Robertooo/autotrain-data-hmaet
co2_eq_emissions:
emissions: 0.30327638531180195
---
# Model Trained Using AutoTrain
- Problem type: Single Column Regression
- Model ID: 2037366891
- CO2 Emissions (in grams): 0.3033
## Validation Metrics
- Loss: 0.067
- R2: 0.486
- MSE: 0.005
- MAE: 0.055
- RMSLE: 0.036
## Usage
```python
import json
import joblib
import pandas as pd
model = joblib.load('model.joblib')
config = json.load(open('config.json'))
features = config['features']
# data = pd.read_csv("data.csv")
data = data[features]
data.columns = ["feat_" + str(col) for col in data.columns]
predictions = model.predict(data) # or model.predict_proba(data)
``` |
ChoboAvenger/DialoGPT-small-joshua | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-11-09T12:25:34Z | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: DavidNo/albert-xxlarge-v2-finetuned-squadv2
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# DavidNo/albert-xxlarge-v2-finetuned-squadv2
This model is a fine-tuned version of [albert-xxlarge-v2](https://huggingface.co/albert-xxlarge-v2) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 0.7633
- Train End Logits Accuracy: 0.6680
- Train Start Logits Accuracy: 0.6407
- Validation Loss: 1.1441
- Validation End Logits Accuracy: 0.5277
- Validation Start Logits Accuracy: 0.5106
- Epoch: 1
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 16494, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False}
- training_precision: float32
### Training results
| Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch |
|:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:|
| 1.0842 | 0.6032 | 0.5767 | 1.1372 | 0.5166 | 0.5058 | 0 |
| 0.7633 | 0.6680 | 0.6407 | 1.1441 | 0.5277 | 0.5106 | 1 |
### Framework versions
- Transformers 4.24.0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
ChrisP/xlm-roberta-base-finetuned-marc-en | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: gogzy/t5-base-finetuned_renre_2021_40
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# gogzy/t5-base-finetuned_renre_2021_40
This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 5.4875
- Validation Loss: 8.0355
- Train Rouge1: 7.0588
- Train Rouge2: 0.0
- Train Rougel: 4.7059
- Train Rougelsum: 4.7059
- Train Gen Len: 19.0
- Epoch: 4
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Validation Loss | Train Rouge1 | Train Rouge2 | Train Rougel | Train Rougelsum | Train Gen Len | Epoch |
|:----------:|:---------------:|:------------:|:------------:|:------------:|:---------------:|:-------------:|:-----:|
| 5.2501 | 9.8474 | 19.5652 | 11.1111 | 13.0435 | 19.5652 | 19.0 | 0 |
| 7.5344 | 9.4134 | 7.0588 | 0.0 | 4.7059 | 4.7059 | 19.0 | 1 |
| 5.0059 | 8.9935 | 7.0588 | 0.0 | 4.7059 | 4.7059 | 19.0 | 2 |
| 5.3830 | 8.5525 | 7.0588 | 0.0 | 4.7059 | 4.7059 | 19.0 | 3 |
| 5.4875 | 8.0355 | 7.0588 | 0.0 | 4.7059 | 4.7059 | 19.0 | 4 |
### Framework versions
- Transformers 4.24.0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
ChrisVCB/DialoGPT-medium-cmjs | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-11-09T12:30:08Z | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 1 with parameters:
```
{'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 1,
"warmup_steps": 1,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
(2): Normalize()
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
ChrisVCB/DialoGPT-medium-ej | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 13 | null | ---
license: apache-2.0
tags:
- pytorch
- diffusers
- super-resolution
- diffusion-super-resolution
---
# Latent Diffusion Models (LDM) for super-resolution
**Paper**: [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)
**Abstract**:
*By decomposing the image formation process into a sequential application of denoising autoencoders, diffusion models (DMs) achieve state-of-the-art synthesis results on image data and beyond. Additionally, their formulation allows for a guiding mechanism to control the image generation process without retraining. However, since these models typically operate directly in pixel space, optimization of powerful DMs often consumes hundreds of GPU days and inference is expensive due to sequential evaluations. To enable DM training on limited computational resources while retaining their quality and flexibility, we apply them in the latent space of powerful pretrained autoencoders. In contrast to previous work, training diffusion models on such a representation allows for the first time to reach a near-optimal point between complexity reduction and detail preservation, greatly boosting visual fidelity. By introducing cross-attention layers into the model architecture, we turn diffusion models into powerful and flexible generators for general conditioning inputs such as text or bounding boxes and high-resolution synthesis becomes possible in a convolutional manner. Our latent diffusion models (LDMs) achieve a new state of the art for image inpainting and highly competitive performance on various tasks, including unconditional image generation, semantic scene synthesis, and super-resolution, while significantly reducing computational requirements compared to pixel-based DMs.*
**Authors**
*Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer*
## Usage
### Inference with a pipeline
```python
!pip install git+https://github.com/huggingface/diffusers.git
import requests
from PIL import Image
from io import BytesIO
from diffusers import LDMSuperResolutionPipeline
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
model_id = "CompVis/ldm-super-resolution-4x-openimages"
# load model and scheduler
pipeline = LDMSuperResolutionPipeline.from_pretrained(model_id)
pipeline = pipeline.to(device)
# let's download an image
url = "https://user-images.githubusercontent.com/38061659/199705896-b48e17b8-b231-47cd-a270-4ffa5a93fa3e.png"
response = requests.get(url)
low_res_img = Image.open(BytesIO(response.content)).convert("RGB")
low_res_img = low_res_img.resize((128, 128))
# run pipeline in inference (sample random noise and denoise)
upscaled_image = pipeline(low_res_img, num_inference_steps=100, eta=1).images[0]
# save image
upscaled_image.save("ldm_generated_image.png")
```
|
Chun/w-en2zh-hsk | [
"pytorch",
"marian",
"text2text-generation",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"MarianMTModel"
],
"model_type": "marian",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | 2022-11-09T12:59:54Z | ---
library_name: sample-factory
tags:
- deep-reinforcement-learning
- reinforcement-learning
- sample-factory
model-index:
- name: APPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: dmlab_30
type: dmlab_30
metrics:
- type: mean_reward
value: 9.18 +/- 0.64
name: mean_reward
verified: false
---
A(n) **APPO** model trained on the **dmlab_30** environment.
This model was trained using Sample Factory 2.0: https://github.com/alex-petrenko/sample-factory
|
Ci/Pai | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: -120.71 +/- 17.76
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Ciruzzo/DialoGPT-small-harrypotter | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | 2022-11-09T13:41:28Z | ---
tags:
- generated_from_trainer
model-index:
- name: GPT2-CLS-Finetuned-MBTI-GPT2-CLS-Finetuned-MBTI-JointGPT2-Warmup-from-CLS
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# GPT2-CLS-Finetuned-MBTI-GPT2-CLS-Finetuned-MBTI-JointGPT2-Warmup-from-CLS
This model is a fine-tuned version of [GItaf/GPT2-CLS-Finetuned-MBTI](https://huggingface.co/GItaf/GPT2-CLS-Finetuned-MBTI) on the None dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Framework versions
- Transformers 4.21.2
- Pytorch 1.12.1
- Datasets 2.4.0
- Tokenizers 0.12.1
|
Clarianliz30/Caitlyn | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language: code
tags:
- code
- gpt2
- generation
datasets:
- giulio98/xlcost-single-prompt
widget:
- text: "'''\nfunction to add two numbers\n'''\n###\n"
example_title: "add two numbers"
model-index:
- name: codegen-350M-multi-xlcost
results:
- task:
name: Code Generation
type: code-generation
dataset:
name: "XLCost"
type: code_eval_outputs
metrics:
- name: pass@1
type: code_eval_outputs
value: 3.325
- name: pass@10
type: code_eval_outputs
value: 15
- name: codebleu
type: codebleu
value: 20.18191
---
# CodeGen-350M-multi-xlcost-v2
CodeGen-350M-multi-xlcost is a CodeGen model fine-tuned on the Python split of XLCost dataset using Deepspeed.
## Usage
You can load the CodeGen-350M-multi-xlcost-v2 model and tokenizer directly in `transformers`:
```Python
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("giulio98/codegen-350M-multi-xlcost-v2")
model = AutoModelForCausalLM.from_pretrained("giulio98/codegen-350M-multi-xlcost-v2")
text = tokenizer.eos_token + "\'\'\'\n" + "function to add two numbers" + "\n\'\'\'\n" + "###\n"
input_ids = tokenizer(text, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, max_length=128)
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
```
Output:
```Python
'''
function to add two numbers
'''
###
def add(a, b):
return a + b
```
## Training
The model was finetuned on [XLCost-single-prompt](https://huggingface.co/datasets/giulio98/xlcost-single-prompt), an improved version of the original XLCost dataset [
xlcost-text-to-code](https://huggingface.co/datasets/codeparrot/xlcost-text-to-code). Below the hyperparameters.
| Hyperparameter | value |
|---------------------------|--------|
|Per device train batch size| 16 |
|Context size| 1024 |
|Training steps| 259|
|Gradient accumulation| 2|
|Gradient checkpointing| True|
|Learning rate|1.8e-05 |
|Weight decay | 0.1 |
|Warmup steps| 35 |
|Schedule| linear |
|zero stage| 2 |
Below the deepspeed configuration
```Python
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 0.000018,
"betas": [
0.9,
0.999
],
"eps": 1e-8,
"weight_decay": 0.1
}
},
"scheduler": {
"type": "WarmupLR",
"params": {
"warmup_min_lr": 0,
"warmup_max_lr": 0.000018,
"warmup_num_steps": 35
}
},
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "cpu",
"pin_memory": false
},
"allgather_partitions": true,
"allgather_bucket_size": 200000000,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 200000000,
"contiguous_gradients": true
},
"gradient_accumulation_steps": 2,
"train_batch_size": 32,
"train_micro_batch_size_per_gpu": 16,
"gradient_clipping": 1,
"wall_clock_breakdown": false
}
```
The training was executed on 1 x V100 (16GB) GPU for 28min 50sec
## Performance
We evaluated the model on the first 400 samples of XLCOST's [XLCost-single-prompt test split](https://huggingface.co/datasets/giulio98/xlcost-single-prompt/viewer/Python/test) and comparing the outputs of the generated codes with respect to the expected output using pass@k metric.
| Metric | codegen-350M-multi-xlcost-v2 | codegen-350M-multi-xlcost | codegen-350M-mono(zero-shot) | codegen-350M-mono (one-shot) | codegen-350M-mono(few-shot)
|--------|-----|-----|-----|-----|-----|
|pass@1 |3.325% |3.70% | 0.4% | 0.35% | 0.48% |
|pass@10 |15%| 14.5% | 3.5% | 3 % | 3.75% |
|CodeBLEU |20.18%| None | 15.15% | 19.42 % | 20.27% |
The [pass@k metric](https://huggingface.co/metrics/code_eval) tells the probability that at least one out of k generations passes the tests.
## Citations
```
@article{Nijkamp2022ACP,
title={A Conversational Paradigm for Program Synthesis},
author={Nijkamp, Erik and Pang, Bo and Hayashi, Hiroaki and Tu, Lifu and Wang, Huan and Zhou, Yingbo and Savarese, Silvio and Xiong, Caiming},
journal={arXiv preprint},
year={2022}
}
``` |
ClaudeYang/awesome_fb_model | [
"pytorch",
"bart",
"text-classification",
"dataset:multi_nli",
"transformers",
"zero-shot-classification"
]
| zero-shot-classification | {
"architectures": [
"BartForSequenceClassification"
],
"model_type": "bart",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 26 | null | ---
license: openrail
library_name: diffusers
tags:
- TPU
- JAX
- Flax
- stable-diffusion
- text-to-image
language:
- en
datasets:
- camenduru/plushies
inference: false
---
flax 🧨 checkpoints are here. ckpt and 🧨 pytorch checkpoints here 🎉🎊 https://huggingface.co/camenduru/plushies-pt
<br/>
Trained with google cloud TPUs.
```
Runtime: 3h 26m 44s
Steps: 18000
Precision: bf16
Learning Rate: 1e-6
```
 |
CleveGreen/FieldClassifier | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 34 | null | ---
tags:
- autotrain
- text-classification
language:
- ca
widget:
- text: "Aquest dissabte, Francesc Solé va arribar a la meta a Ordino com el guanyador del Ultra Trail d'Andorra després de 170km amb un desnivell altitudinal de 13 500 metres, en un temps de 31 hores i 9 minuts."
- text: "Una cançó és una composició musical que conté, a vegades, una part amb veu o melodia vocal, és a dir, amb text, cantada, però també pot ser simplement un conjunt de notes tocades sistemàticament, formant un ritme."
datasets:
- projecte-aina/WikiCAT_ca
co2_eq_emissions:
emissions: 47.543878831739285
---
# Model Trained Using AutoTrain
- Problem type: Multi-class Classification
- Model ID: 2036166932
- CO2 Emissions (in grams): 47.5439
## Validation Metrics
- Loss: 0.701
- Accuracy: 0.787
- Macro F1: 0.776
- Micro F1: 0.787
- Weighted F1: 0.784
- Macro Precision: 0.786
- Micro Precision: 0.787
- Weighted Precision: 0.788
- Macro Recall: 0.775
- Micro Recall: 0.787
- Weighted Recall: 0.787
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/crodri/autotrain-wikicat_ca-2036166932
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("crodri/wikicat_ca", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("crodri/wikicat_ca", use_auth_token=True)
inputs = tokenizer("Una cançó és una composició musical que conté, a vegades, una part amb veu o melodia vocal, és a dir, amb text, cantada, però també pot ser simplement un conjunt de notes tocades sistemàticament, formant un ritme.", return_tensors="pt")
outputs = model(**inputs)
``` |
CleveGreen/JobClassifier_v2_gpt | [
"pytorch",
"gpt2",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"GPT2ForSequenceClassification"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 27 | null | ---
tags:
- generated_from_trainer
model-index:
- name: PELM-JointGPT2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# PELM-JointGPT2
This model is based on PELM framework and initialised from [genGPT-2](https://huggingface.co/GItaf/GPT2-LM-Finetuned-MBTI), then fine-tuned on the [MBTI dataset](https://www.kaggle.com/datasets/datasnaek/mbti-type).
It achieves the following results on the evaluation set:
- Loss: 4.3556
- Cls loss: 1.5778
- Lm loss: 3.9609
- Cls Accuracy: 0.6202
- Cls F1: 0.6126
- Cls Precision: 0.6216
- Cls Recall: 0.6202
- Perplexity: 52.50
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 2
- eval_batch_size: 2
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Cls loss | Lm loss | Cls Accuracy | Cls F1 | Cls Precision | Cls Recall | Perplexity |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:-------:|:------------:|:------:|:-------------:|:----------:|:----------:|
| 4.2735 | 1.0 | 3470 | 4.3562 | 1.5844 | 3.9598 | 0.5833 | 0.5708 | 0.5928 | 0.5833 | 52.45 |
| 4.0754 | 2.0 | 6940 | 4.3295 | 1.4806 | 3.9590 | 0.6196 | 0.6113 | 0.6332 | 0.6196 | 52.41 |
| 3.985 | 3.0 | 10410 | 4.3556 | 1.5778 | 3.9609 | 0.6202 | 0.6126 | 0.6216 | 0.6202 | 52.50 |
### Framework versions
- Transformers 4.21.2
- Pytorch 1.12.1
- Datasets 2.4.0
- Tokenizers 0.12.1 |
Clint/clinton | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- LunarLander-v2
- ppo
- deep-reinforcement-learning
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: -33.15 +/- 17.80
name: mean_reward
verified: false
---
# PPO Agent Playing LunarLander-v2
This is a trained model of a PPO agent playing LunarLander-v2.
To learn to code your own PPO agent and train it Unit 8 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit8
# Hyperparameters
```python
{'exp_name': 'ppo'
'seed': 1
'torch_deterministic': True
'cuda': True
'track': False
'wandb_project_name': 'cleanRL'
'wandb_entity': None
'capture_video': False
'env_id': 'LunarLander-v2'
'total_timesteps': 250000
'learning_rate': 0.00025
'num_envs': 2
'num_steps': 128
'anneal_lr': True
'gae': True
'gamma': 0.99
'gae_lambda': 0.95
'num_minibatches': 4
'update_epochs': 4
'norm_adv': True
'clip_coef': 0.2
'clip_vloss': True
'ent_coef': 0.01
'vf_coef': 0.5
'max_grad_norm': 0.5
'target_kl': None
'repo_id': 'Terence3927/ppo-LunarLander-v2'
'batch_size': 256
'minibatch_size': 64}
```
|
CoachCarter/distilbert-base-uncased-finetuned-squad | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-11-09T15:02:01Z | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: TSE_XLNET_5E
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# TSE_XLNET_5E
This model is a fine-tuned version of [xlnet-base-cased](https://huggingface.co/xlnet-base-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4463
- Accuracy: 0.9333
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 3e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6717 | 0.06 | 50 | 0.4377 | 0.8533 |
| 0.3989 | 0.12 | 100 | 0.4525 | 0.84 |
| 0.3433 | 0.17 | 150 | 0.3348 | 0.9133 |
| 0.2646 | 0.23 | 200 | 0.3722 | 0.9 |
| 0.3052 | 0.29 | 250 | 0.3306 | 0.8933 |
| 0.2583 | 0.35 | 300 | 0.3129 | 0.92 |
| 0.2712 | 0.4 | 350 | 0.3147 | 0.9 |
| 0.2708 | 0.46 | 400 | 0.2680 | 0.9 |
| 0.2443 | 0.52 | 450 | 0.2261 | 0.9133 |
| 0.2463 | 0.58 | 500 | 0.2583 | 0.9067 |
| 0.2525 | 0.63 | 550 | 0.2719 | 0.92 |
| 0.2522 | 0.69 | 600 | 0.3905 | 0.8933 |
| 0.2078 | 0.75 | 650 | 0.2674 | 0.9133 |
| 0.264 | 0.81 | 700 | 0.2774 | 0.9133 |
| 0.211 | 0.87 | 750 | 0.2652 | 0.9333 |
| 0.286 | 0.92 | 800 | 0.1777 | 0.94 |
| 0.2341 | 0.98 | 850 | 0.2570 | 0.9133 |
| 0.1797 | 1.04 | 900 | 0.3162 | 0.92 |
| 0.1831 | 1.1 | 950 | 0.3205 | 0.92 |
| 0.2006 | 1.15 | 1000 | 0.3173 | 0.9133 |
| 0.1555 | 1.21 | 1050 | 0.3388 | 0.9267 |
| 0.1712 | 1.27 | 1100 | 0.3968 | 0.92 |
| 0.1488 | 1.33 | 1150 | 0.4167 | 0.9133 |
| 0.1893 | 1.38 | 1200 | 0.3269 | 0.9267 |
| 0.1543 | 1.44 | 1250 | 0.3797 | 0.9133 |
| 0.1825 | 1.5 | 1300 | 0.2203 | 0.94 |
| 0.1841 | 1.56 | 1350 | 0.2744 | 0.9133 |
| 0.1523 | 1.61 | 1400 | 0.3561 | 0.9067 |
| 0.1914 | 1.67 | 1450 | 0.2859 | 0.9067 |
| 0.1742 | 1.73 | 1500 | 0.2461 | 0.9267 |
| 0.145 | 1.79 | 1550 | 0.4266 | 0.9133 |
| 0.208 | 1.85 | 1600 | 0.3470 | 0.9067 |
| 0.147 | 1.9 | 1650 | 0.4521 | 0.9133 |
| 0.1867 | 1.96 | 1700 | 0.3648 | 0.9067 |
| 0.182 | 2.02 | 1750 | 0.2659 | 0.9333 |
| 0.1079 | 2.08 | 1800 | 0.3393 | 0.92 |
| 0.1338 | 2.13 | 1850 | 0.3483 | 0.9267 |
| 0.1181 | 2.19 | 1900 | 0.4384 | 0.92 |
| 0.1418 | 2.25 | 1950 | 0.3468 | 0.9267 |
| 0.0953 | 2.31 | 2000 | 0.4008 | 0.9267 |
| 0.1313 | 2.36 | 2050 | 0.3301 | 0.9333 |
| 0.0499 | 2.42 | 2100 | 0.4018 | 0.92 |
| 0.1197 | 2.48 | 2150 | 0.3394 | 0.9267 |
| 0.1237 | 2.54 | 2200 | 0.3399 | 0.92 |
| 0.0766 | 2.6 | 2250 | 0.3947 | 0.9267 |
| 0.1142 | 2.65 | 2300 | 0.4055 | 0.9133 |
| 0.1362 | 2.71 | 2350 | 0.2599 | 0.9333 |
| 0.1332 | 2.77 | 2400 | 0.3293 | 0.9133 |
| 0.1241 | 2.83 | 2450 | 0.3717 | 0.92 |
| 0.0696 | 2.88 | 2500 | 0.4440 | 0.92 |
| 0.1012 | 2.94 | 2550 | 0.4026 | 0.92 |
| 0.1028 | 3.0 | 2600 | 0.4202 | 0.9133 |
| 0.0551 | 3.06 | 2650 | 0.4649 | 0.9133 |
| 0.0796 | 3.11 | 2700 | 0.4053 | 0.92 |
| 0.0786 | 3.17 | 2750 | 0.4862 | 0.9067 |
| 0.0843 | 3.23 | 2800 | 0.4007 | 0.9267 |
| 0.0502 | 3.29 | 2850 | 0.4510 | 0.92 |
| 0.0726 | 3.34 | 2900 | 0.4171 | 0.9267 |
| 0.0933 | 3.4 | 2950 | 0.3485 | 0.9333 |
| 0.0624 | 3.46 | 3000 | 0.4442 | 0.9133 |
| 0.0475 | 3.52 | 3050 | 0.4449 | 0.92 |
| 0.0498 | 3.58 | 3100 | 0.4147 | 0.9267 |
| 0.1101 | 3.63 | 3150 | 0.3484 | 0.9333 |
| 0.0785 | 3.69 | 3200 | 0.3630 | 0.9267 |
| 0.075 | 3.75 | 3250 | 0.4267 | 0.92 |
| 0.0709 | 3.81 | 3300 | 0.3638 | 0.9267 |
| 0.0754 | 3.86 | 3350 | 0.3890 | 0.9333 |
| 0.1038 | 3.92 | 3400 | 0.3910 | 0.9267 |
| 0.0274 | 3.98 | 3450 | 0.4246 | 0.9267 |
| 0.0723 | 4.04 | 3500 | 0.3847 | 0.9267 |
| 0.015 | 4.09 | 3550 | 0.4134 | 0.9333 |
| 0.0329 | 4.15 | 3600 | 0.4136 | 0.9333 |
| 0.0619 | 4.21 | 3650 | 0.4048 | 0.9333 |
| 0.0505 | 4.27 | 3700 | 0.4228 | 0.9267 |
| 0.0523 | 4.33 | 3750 | 0.4139 | 0.9267 |
| 0.0365 | 4.38 | 3800 | 0.4067 | 0.9267 |
| 0.0434 | 4.44 | 3850 | 0.4132 | 0.9333 |
| 0.0262 | 4.5 | 3900 | 0.4245 | 0.9333 |
| 0.0534 | 4.56 | 3950 | 0.4217 | 0.9333 |
| 0.0186 | 4.61 | 4000 | 0.4282 | 0.9333 |
| 0.0548 | 4.67 | 4050 | 0.4255 | 0.9333 |
| 0.0146 | 4.73 | 4100 | 0.4368 | 0.9333 |
| 0.0442 | 4.79 | 4150 | 0.4470 | 0.9333 |
| 0.0431 | 4.84 | 4200 | 0.4469 | 0.9333 |
| 0.0297 | 4.9 | 4250 | 0.4470 | 0.9333 |
| 0.0601 | 4.96 | 4300 | 0.4463 | 0.9333 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.13.0
- Datasets 2.3.2
- Tokenizers 0.13.1
|
CoachCarter/distilbert-base-uncased | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2022-11-09T15:20:14Z | ---
license: cc-by-nc-sa-4.0
---
This is a blended model containing approximately 40 Dreambooth finetunings, two of which are mine, and should be used ideally with a complimentary hypernetwork and the latest VAE for best results.
My style is somewhat blown out looking and relies on a lot of artifacts, I tend to set things high for CFG and step count.
|
CodeMonkey98/distilroberta-base-finetuned-wikitext2 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
- f1
model-index:
- name: distilbert-base-uncased_finetuned_Balance_Upsampling_SPEECH_TEXT_DISPLAY_v1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased_finetuned_Balance_Upsampling_SPEECH_TEXT_DISPLAY_v1
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.6982
- Accuracy: 0.7759
- F1: 0.7743
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 10
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|
| 0.5321 | 1.0 | 7958 | 1.3225 | 0.7271 | 0.7391 |
| 0.2967 | 2.0 | 15916 | 1.3868 | 0.7574 | 0.7601 |
| 0.1821 | 3.0 | 23874 | 1.4753 | 0.7513 | 0.7515 |
| 0.1193 | 4.0 | 31832 | 1.7028 | 0.7588 | 0.7596 |
| 0.0722 | 5.0 | 39790 | 1.8155 | 0.7615 | 0.7599 |
| 0.041 | 6.0 | 47748 | 2.1622 | 0.7695 | 0.7678 |
| 0.0258 | 7.0 | 55706 | 2.3871 | 0.75 | 0.7462 |
| 0.0149 | 8.0 | 63664 | 2.6135 | 0.7571 | 0.7524 |
| 0.0076 | 9.0 | 71622 | 2.7974 | 0.7648 | 0.7617 |
| 0.0051 | 10.0 | 79580 | 2.6982 | 0.7759 | 0.7743 |
### Framework versions
- Transformers 4.22.2
- Pytorch 1.10.2
- Datasets 2.5.2
- Tokenizers 0.12.1
|
CodeNinja1126/bert-q-encoder | [
"pytorch"
]
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
library_name: stable-baselines3
tags:
- AntBulletEnv-v0
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: A2C
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: AntBulletEnv-v0
type: AntBulletEnv-v0
metrics:
- type: mean_reward
value: 376.30 +/- 46.89
name: mean_reward
verified: false
---
# **A2C** Agent playing **AntBulletEnv-v0**
This is a trained model of a **A2C** agent playing **AntBulletEnv-v0**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
CodeNinja1126/koelectra-model | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: stable-baselines3
tags:
- AntBulletEnv-v0
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: A2C
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: AntBulletEnv-v0
type: AntBulletEnv-v0
metrics:
- type: mean_reward
value: 268.56 +/- 46.53
name: mean_reward
verified: false
---
# **A2C** Agent playing **AntBulletEnv-v0**
This is a trained model of a **A2C** agent playing **AntBulletEnv-v0**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
CodeNinja1126/test-model | [
"pytorch",
"jax",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 24 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: BERiT_2000
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BERiT_2000
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 6.7293
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 6.9294 | 0.19 | 500 | 6.8136 |
| 6.7692 | 0.39 | 1000 | 6.8006 |
| 6.7567 | 0.58 | 1500 | 6.7770 |
| 6.746 | 0.77 | 2000 | 6.7414 |
| 6.7577 | 0.97 | 2500 | 6.7333 |
| 6.7295 | 1.16 | 3000 | 6.7405 |
| 6.7635 | 1.36 | 3500 | 6.7272 |
| 6.7715 | 1.55 | 4000 | 6.7114 |
| 6.7348 | 1.74 | 4500 | 6.7275 |
| 6.719 | 1.94 | 5000 | 6.7322 |
| 6.7427 | 2.13 | 5500 | 6.7242 |
| 6.7136 | 2.32 | 6000 | 6.6852 |
| 6.719 | 2.52 | 6500 | 6.7430 |
| 6.7229 | 2.71 | 7000 | 6.7331 |
| 6.7166 | 2.9 | 7500 | 6.7293 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CoderBoy432/DialoGPT-small-harrypotter | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | null | ---
license: mit
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: TSE_roBERTa_5E
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# TSE_roBERTa_5E
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2671
- Accuracy: 0.9533
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.68 | 0.06 | 50 | 0.5879 | 0.9133 |
| 0.3596 | 0.12 | 100 | 0.3471 | 0.9 |
| 0.3019 | 0.17 | 150 | 0.2314 | 0.9333 |
| 0.2724 | 0.23 | 200 | 0.1860 | 0.9533 |
| 0.2641 | 0.29 | 250 | 0.2271 | 0.94 |
| 0.2941 | 0.35 | 300 | 0.1763 | 0.9467 |
| 0.2494 | 0.4 | 350 | 0.2019 | 0.94 |
| 0.221 | 0.46 | 400 | 0.2450 | 0.9533 |
| 0.2456 | 0.52 | 450 | 0.2298 | 0.9467 |
| 0.1705 | 0.58 | 500 | 0.2139 | 0.9533 |
| 0.1973 | 0.63 | 550 | 0.2810 | 0.9333 |
| 0.2348 | 0.69 | 600 | 0.2539 | 0.94 |
| 0.2561 | 0.75 | 650 | 0.2330 | 0.9333 |
| 0.2166 | 0.81 | 700 | 0.2083 | 0.9467 |
| 0.205 | 0.87 | 750 | 0.2768 | 0.92 |
| 0.2182 | 0.92 | 800 | 0.2182 | 0.94 |
| 0.2009 | 0.98 | 850 | 0.2534 | 0.94 |
| 0.1388 | 1.04 | 900 | 0.3099 | 0.9267 |
| 0.1208 | 1.1 | 950 | 0.2770 | 0.9467 |
| 0.1795 | 1.15 | 1000 | 0.2078 | 0.9467 |
| 0.1443 | 1.21 | 1050 | 0.1965 | 0.96 |
| 0.1519 | 1.27 | 1100 | 0.1918 | 0.9533 |
| 0.1653 | 1.33 | 1150 | 0.1850 | 0.96 |
| 0.1689 | 1.38 | 1200 | 0.2261 | 0.9467 |
| 0.1802 | 1.44 | 1250 | 0.2246 | 0.96 |
| 0.1894 | 1.5 | 1300 | 0.2026 | 0.96 |
| 0.219 | 1.56 | 1350 | 0.1598 | 0.96 |
| 0.1608 | 1.61 | 1400 | 0.1571 | 0.96 |
| 0.1976 | 1.67 | 1450 | 0.1699 | 0.9533 |
| 0.1987 | 1.73 | 1500 | 0.2173 | 0.9533 |
| 0.1503 | 1.79 | 1550 | 0.2097 | 0.9533 |
| 0.1293 | 1.85 | 1600 | 0.2316 | 0.9533 |
| 0.2267 | 1.9 | 1650 | 0.1664 | 0.9533 |
| 0.1833 | 1.96 | 1700 | 0.1829 | 0.9533 |
| 0.1991 | 2.02 | 1750 | 0.1854 | 0.96 |
| 0.0965 | 2.08 | 1800 | 0.2719 | 0.94 |
| 0.1869 | 2.13 | 1850 | 0.1759 | 0.9667 |
| 0.154 | 2.19 | 1900 | 0.2418 | 0.9533 |
| 0.1093 | 2.25 | 1950 | 0.2517 | 0.9533 |
| 0.1829 | 2.31 | 2000 | 0.2011 | 0.9667 |
| 0.1331 | 2.36 | 2050 | 0.2125 | 0.9667 |
| 0.1211 | 2.42 | 2100 | 0.2759 | 0.9533 |
| 0.1523 | 2.48 | 2150 | 0.2093 | 0.9533 |
| 0.1224 | 2.54 | 2200 | 0.2132 | 0.96 |
| 0.1205 | 2.6 | 2250 | 0.2117 | 0.96 |
| 0.1068 | 2.65 | 2300 | 0.2024 | 0.9667 |
| 0.1563 | 2.71 | 2350 | 0.1979 | 0.9533 |
| 0.1064 | 2.77 | 2400 | 0.2397 | 0.9533 |
| 0.1393 | 2.83 | 2450 | 0.2133 | 0.9533 |
| 0.0999 | 2.88 | 2500 | 0.2248 | 0.9533 |
| 0.1383 | 2.94 | 2550 | 0.2273 | 0.9467 |
| 0.1315 | 3.0 | 2600 | 0.2289 | 0.9467 |
| 0.095 | 3.06 | 2650 | 0.2668 | 0.9467 |
| 0.1249 | 3.11 | 2700 | 0.2345 | 0.96 |
| 0.0653 | 3.17 | 2750 | 0.2188 | 0.96 |
| 0.1102 | 3.23 | 2800 | 0.2601 | 0.9533 |
| 0.1118 | 3.29 | 2850 | 0.2241 | 0.9667 |
| 0.0746 | 3.34 | 2900 | 0.2306 | 0.96 |
| 0.0875 | 3.4 | 2950 | 0.2906 | 0.9467 |
| 0.0943 | 3.46 | 3000 | 0.2528 | 0.96 |
| 0.1253 | 3.52 | 3050 | 0.2503 | 0.9533 |
| 0.0971 | 3.58 | 3100 | 0.2182 | 0.96 |
| 0.0919 | 3.63 | 3150 | 0.2224 | 0.96 |
| 0.1053 | 3.69 | 3200 | 0.2114 | 0.9667 |
| 0.1041 | 3.75 | 3250 | 0.2055 | 0.9667 |
| 0.0836 | 3.81 | 3300 | 0.2196 | 0.96 |
| 0.0873 | 3.86 | 3350 | 0.2129 | 0.96 |
| 0.0725 | 3.92 | 3400 | 0.2352 | 0.9533 |
| 0.1187 | 3.98 | 3450 | 0.2114 | 0.96 |
| 0.108 | 4.04 | 3500 | 0.2233 | 0.96 |
| 0.0725 | 4.09 | 3550 | 0.2538 | 0.9533 |
| 0.0856 | 4.15 | 3600 | 0.2433 | 0.9533 |
| 0.0921 | 4.21 | 3650 | 0.2316 | 0.9533 |
| 0.0561 | 4.27 | 3700 | 0.2548 | 0.9533 |
| 0.0774 | 4.33 | 3750 | 0.2247 | 0.96 |
| 0.0508 | 4.38 | 3800 | 0.2389 | 0.96 |
| 0.1014 | 4.44 | 3850 | 0.2755 | 0.9533 |
| 0.0598 | 4.5 | 3900 | 0.2750 | 0.9533 |
| 0.0796 | 4.56 | 3950 | 0.2697 | 0.9533 |
| 0.0718 | 4.61 | 4000 | 0.2648 | 0.9533 |
| 0.0566 | 4.67 | 4050 | 0.2620 | 0.9533 |
| 0.0704 | 4.73 | 4100 | 0.2516 | 0.9533 |
| 0.0582 | 4.79 | 4150 | 0.2653 | 0.9533 |
| 0.1066 | 4.84 | 4200 | 0.2722 | 0.9467 |
| 0.0782 | 4.9 | 4250 | 0.2698 | 0.9533 |
| 0.0318 | 4.96 | 4300 | 0.2671 | 0.9533 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.13.0
- Datasets 2.3.2
- Tokenizers 0.13.1
|
CoderEFE/DialoGPT-marxbot | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational",
"has_space"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | 2022-11-09T15:58:46Z | ---
tags:
- conversational
---
# Harry Potter Bot AI |
CoderEFE/DialoGPT-medium-marx | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2022-11-09T16:19:50Z | ---
license: gpl-3.0
---
# Breast Estrogen Receptor v1 Model Card
This model card describes a model associated with a manuscript currently under peer review. Further details regarding the manuscript and an Arxiv link will be included shortly.
## Model Details
- **Developed by:** James Dolezal
- **Model type:** Deep convolutional neural network image classifier
- **Language(s):** English
- **License:** GPL-3.0
- **Model Description:** This is a model that can predict, from H&E-stained pathologic images of breast cancer, whether a tumor is likely to be estrogen receptor (ER) negative or positive. It is an [Xception](https://arxiv.org/abs/1610.02357) model with two dropout-enabled hidden layers.
- **Image processing:** This model expects images of H&E-stained pathology slides at 299 x 299 px and 302 x 302 μm resolution. Images should be stain-normalized using a modified Reinhard normalizer ("Reinhard-Fast") available [here](https://github.com/jamesdolezal/slideflow/blob/master/slideflow/norm/tensorflow/reinhard.py). The stain normalizer should be fit using the `target_means` and `target_stds` listed in the model `params.json` file. Images should be should be standardized with `tf.image.per_image_standardization()`.
- **Resources for more information:** [GitHub Repository](https://github.com/jamesdolezal/histologic-sheep)
# Uses
## Examples
For direct use, the model can be loaded using Tensorflow/Keras:
```
import tensorflow as tf
model = tf.keras.models.load_model('/path/')
```
or loaded with [Slideflow](https://github.com/jamesdolezal/slideflow) version 1.1+ with the following syntax:
```
import slideflow as sf
model = sf.model.load('/path/')
```
The stain normalizer can be loaded and fit using Slideflow:
```
normalizer = sf.util.get_model_normalizer('/path/')
```
The stain normalizer has a native Tensorflow transform and can be directly applied to a tf.data.Dataset:
```
# Map the stain normalizer transformation
# to a tf.data.Dataset
dataset = dataset.map(normalizer.tf_to_tf)
```
Alternatively, the model can be used to generate predictions for whole-slide images processed through Slideflow in an end-to-end [Project](https://slideflow.dev/project_setup.html). To use the model to generate predictions on data processed with Slideflow, simply pass the model to the [`Project.predict()`](https://slideflow.dev/project.html#slideflow.Project.predict) function:
```
import slideflow
P = sf.Project('/path/to/slideflow/project')
P.predict('/model/path')
```
## Direct Use
This model is intended for research purposes only. Possible research areas and tasks include
- Applications in educational settings.
- Research on pathology classification models for breast cancer.
Excluded uses are described below.
### Misuse and Out-of-Scope Use
This model should not be used in a clinical setting to generate predictions that will be used to inform patients, physicians, or any other health care members directly involved in their health care outside the context of an approved research protocol. Using the model in a clinical setting outside the context of an approved research protocol is a misuse of this model. This includes, but is not limited to:
- Generating predictions of images from a patient's tumor and sharing those predictions with the patient
- Generating predictions of images from a patient's tumor and sharing those predictions with the patient's physician, or other members of the patient's healthcare team
- Influencing a patient's health care treatment in any way based on output from this model
### Limitations
The model has not been validated to discriminate estrogen receptor status in a manner which controls for possible underlying biological bias, such tumor grade or histological subtype.
### Bias
This model was trained on The Cancer Genome Atlas (TCGA), which contains patient data from communities and cultures which may not reflect the general population. This datasets is comprised of images from multiple institutions, which may introduce a potential source of bias from site-specific batch effects ([Howard, 2021](https://www.nature.com/articles/s41467-021-24698-1)).
## Training
**Training Data**
The following dataset was used to train the model:
- The Cancer Genome Atlas (TCGA), BRCA cohort (see next section)
This model was trained on a total of 1,048 slides, with 228 ER-negative tumor and 820 ER-positive tumors.
**Training Procedure**
Each whole-slide image was sectioned into smaller images in a grid-wise fashion in order to extract tiles from whole-slide images at 302 x 302 μm. Image tiles were extracted at the nearest downsample layer, and resized to 299 x 299 px using [Libvips](https://www.libvips.org/API/current/libvips-resample.html#vips-resize). During training,
- Images are stain-normalized with a modified Reinhard normalizer ("Reinhard-Fast"), which excludes the brightness standardization step, available [here](https://github.com/jamesdolezal/slideflow/blob/master/slideflow/norm/tensorflow/reinhard.py)
- Images are randomly flipped and rotated (90, 180, 270)
- Images have a 50% chance of being JPEG compressed with quality level between 50-100%
- Images have a 10% chance of random Gaussian blur, with sigma between 0.5-2.0
- Images are standardized with `tf.image.per_image_standardization()`
- Images are classified through an Xception block, followed by two hidden layers with dropout (p=0.1) enabled during training
- The loss is cross-entropy, with ER-negative=0 and ER-positive=1
- Training is completed after 1 epoch
Additional training information:
- **Hardware:** 1 x A100 GPUs
- **Optimizer:** Adam
- **Batch:** 128
- **Learning rate:** 0.0001, with a decay of 0.98 every 512 steps
- **Hidden layers:** 2 hidden layers of width 1024, with dropout p=0.1
## Evaluation Results
External evaluation results are currently under peer review and will be posted once publicly available. |
CoffeeAddict93/gpt1-call-of-the-wild | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: mit
---
### Fast_DreamBooth_AMLO on Stable Diffusion via Dreambooth trained on the [fast-DreamBooth.ipynb by TheLastBen](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
#### model by mrcrois
This your the Stable Diffusion model fine-tuned the Fast_DreamBooth_AMLO concept taught to Stable Diffusion with Dreambooth.
It can be used by modifying the `instance_prompt(s)`: **AMLO17.jpg, AMLO21.jpg, AMLO9.jpg, AMLO18.jpg, AMLO2.jpg, AMLO1.jpg, AMLO13.jpg, AMLO15.jpg, AMLO14.jpg, AMLO22.jpg, AMLO4.jpg, AMLO16.jpg, AMLO11.jpg, AMLO7.jpg, AMLO8.jpg, AMLO19.jpg, AMLO10.jpg, AMLO6.jpg, AMLO20.jpg, AMLO12.jpg, AMLO5.jpg**
You can also train your own concepts and upload them to the library by using [the fast-DremaBooth.ipynb by TheLastBen](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb).
And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
Here are the images used for training this concept:
AMLO5.jpg
AMLO12.jpg
AMLO20.jpg
AMLO6.jpg
AMLO10.jpg
AMLO19.jpg
AMLO8.jpg
AMLO7.jpg
AMLO11.jpg
AMLO16.jpg
AMLO4.jpg
AMLO22.jpg
AMLO14.jpg
AMLO15.jpg
AMLO13.jpg
AMLO1.jpg
AMLO2.jpg
AMLO18.jpg
AMLO9.jpg
AMLO21.jpg
AMLO17.jpg





















|
CoffeeAddict93/gpt1-modest-proposal | [
"pytorch",
"openai-gpt",
"text-generation",
"transformers",
"has_space"
]
| text-generation | {
"architectures": [
"OpenAIGPTLMHeadModel"
],
"model_type": "openai-gpt",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | null | ---
language:
- hi
license: apache-2.0
tags:
- hf-asr-leaderboard
- generated_from_trainer
datasets:
- mozilla-foundation/common_voice_11_0
metrics:
- wer
model-index:
- name: Whisper Small Fr - Joss
results:
- task:
name: Automatic Speech Recognition
type: automatic-speech-recognition
dataset:
name: Common Voice 11.0 FR
type: mozilla-foundation/common_voice_11_0
args: 'config: fr, split: test'
metrics:
- name: Wer
type: wer
value: 24.03653329331678
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Whisper Small Fr - Joss
This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 FR dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4212
- Wer: 24.0365
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 500
- training_steps: 4000
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:----:|:---------------:|:-------:|
| 0.3803 | 0.99 | 1000 | 0.3992 | 23.9465 |
| 0.2214 | 1.99 | 2000 | 0.3902 | 22.8108 |
| 0.0986 | 2.98 | 3000 | 0.4028 | 22.4459 |
| 0.0478 | 3.98 | 4000 | 0.4212 | 24.0365 |
### Framework versions
- Transformers 4.25.0.dev0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CoffeeAddict93/gpt2-medium-call-of-the-wild | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 14 | null | ---
license: gpl-3.0
---
# Thyroid BRAF-RAS Score (BRS) v1 Model Card
This model card describes a model associated with the manuscript "Deep learning prediction of BRAF-RAS gene expression signature identifies noninvasive follicular thyroid neoplasms with papillary-like nuclear features", by Dolezal _et al_, available [here](https://www.nature.com/articles/s41379-020-00724-3)
## Model Details
- **Developed by:** James Dolezal
- **Model type:** Deep convolutional neural network image classifier
- **Language(s):** English
- **License:** GPL-3.0
- **Model Description:** This is a model that can predict, from H&E-stained pathologic images of thyroid neoplasms, the predicted BRAF-RAS Score (BRS). BRS is a gene expression score scaled from -1 (BRAF-like) to +1 (RAS-like) indicating how similar a tumor's gene expression is to a BRAF-mutant and RAS-mutant tumor. The model is an [Xception](https://arxiv.org/abs/1610.02357) model with two dropout-enabled hidden layers.
- **Image processing:** This model expects images of H&E-stained pathology slides at 299 x 299 px and 302 x 302 μm resolution. Images should be stain-normalized using a modified Reinhard normalizer ("Reinhard-Fast") available [here](https://github.com/jamesdolezal/slideflow/blob/master/slideflow/norm/tensorflow/reinhard.py). The stain normalizer should be fit using the `target_means` and `target_stds` listed in the model `params.json` file. Images should be should be standardized with `tf.image.per_image_standardization()`.
- **Resources for more information:** [GitHub Repository](https://github.com/jamesdolezal/histologic-sheep)
# Uses
## Examples
For direct use, the model can be loaded using Tensorflow/Keras:
```
import tensorflow as tf
model = tf.keras.models.load_model('/path/')
```
or loaded with [Slideflow](https://github.com/jamesdolezal/slideflow) version 1.1+ with the following syntax:
```
import slideflow as sf
model = sf.model.load('/path/')
```
The stain normalizer can be loaded and fit using Slideflow:
```
normalizer = sf.util.get_model_normalizer('/path/')
```
The stain normalizer has a native Tensorflow transform and can be directly applied to a tf.data.Dataset:
```
# Map the stain normalizer transformation
# to a tf.data.Dataset
dataset = dataset.map(normalizer.tf_to_tf)
```
Alternatively, the model can be used to generate predictions for whole-slide images processed through Slideflow in an end-to-end [Project](https://slideflow.dev/project_setup.html). To use the model to generate predictions on data processed with Slideflow, simply pass the model to the [`Project.predict()`](https://slideflow.dev/project.html#slideflow.Project.predict) function:
```
import slideflow
P = sf.Project('/path/to/slideflow/project')
P.predict('/model/path')
```
## Direct Use
This model is intended for research purposes only. Possible research areas and tasks include
- Applications in educational settings.
- Research on pathology classification models for thyroid neoplasms.
Excluded uses are described below.
### Misuse and Out-of-Scope Use
This model should not be used in a clinical setting to generate predictions that will be used to inform patients, physicians, or any other health care members directly involved in their health care outside the context of an approved research protocol. Using the model in a clinical setting outside the context of an approved research protocol is a misuse of this model. This includes, but is not limited to:
- Generating predictions of images from a patient's tumor and sharing those predictions with the patient
- Generating predictions of images from a patient's tumor and sharing those predictions with the patient's physician, or other members of the patient's healthcare team
- Influencing a patient's health care treatment in any way based on output from this model
### Limitations
The model has not been validated in contexts where non-thyroid neoplasms, or rare thyroid subtypes such as anaplastic thyroid carcinoma, are possible.
### Bias
This model was trained on The Cancer Genome Atlas (TCGA), which contains patient data from communities and cultures which may not reflect the general population. This datasets is comprised of images from multiple institutions, which may introduce a potential source of bias from site-specific batch effects ([Howard, 2021](https://www.nature.com/articles/s41467-021-24698-1)).
## Training
**Training Data**
The following dataset was used to train the model:
- The Cancer Genome Atlas (TCGA), THCA cohort (see next section)
This model was trained on a total of 369 slides, with 116 BRAF-like tumors and 271 RAS-like tumors.
**Training Procedure**
Each whole-slide image was sectioned into smaller images in a grid-wise fashion in order to extract tiles from whole-slide images at 302 x 302 μm. Image tiles were extracted at the nearest downsample layer, and resized to 299 x 299 px using [Libvips](https://www.libvips.org/API/current/libvips-resample.html#vips-resize). During training,
- Images are stain-normalized with a modified Reinhard normalizer ("Reinhard-Fast"), which excludes the brightness standardization step, available [here](https://github.com/jamesdolezal/slideflow/blob/master/slideflow/norm/tensorflow/reinhard.py)
- Images are randomly flipped and rotated (90, 180, 270)
- Images have a 50% chance of being JPEG compressed with quality level between 50-100%
- Images have a 10% chance of random Gaussian blur, with sigma between 0.5-2.0
- Images are standardized with `tf.image.per_image_standardization()`
- Images are classified through an Xception block, followed by two hidden layers with dropout (p=0.1) enabled during training
- The loss is mean squared error using the linear outcome BRS
- Training is completed after 1 epoch
Additional training information:
- **Hardware:** 1 x A100 GPUs
- **Optimizer:** Adam
- **Batch:** 128
- **Learning rate:** 0.0001, with a decay of 0.98 every 512 steps
- **Hidden layers:** 2 hidden layers of width 1024, with dropout p=0.1
## Evaluation Results
External evaluation results are currently under peer review and will be posted once publicly available. |
CoffeeAddict93/gpt2-medium-modest-proposal | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
tags:
- stable-diffusion
- text-to-image
license: creativeml-openrail-m
inference: false
--- |
CogComp/roberta-temporal-predictor | [
"pytorch",
"roberta",
"fill-mask",
"arxiv:2202.00436",
"transformers",
"license:mit",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 14 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: TSE_BERT_5E
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# TSE_BERT_5E
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3664
- Accuracy: 0.9267
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6836 | 0.06 | 50 | 0.5614 | 0.8267 |
| 0.4679 | 0.12 | 100 | 0.3521 | 0.9 |
| 0.3325 | 0.17 | 150 | 0.2747 | 0.8933 |
| 0.2493 | 0.23 | 200 | 0.2712 | 0.9067 |
| 0.273 | 0.29 | 250 | 0.2304 | 0.9333 |
| 0.2888 | 0.35 | 300 | 0.2253 | 0.92 |
| 0.2558 | 0.4 | 350 | 0.2110 | 0.9267 |
| 0.1997 | 0.46 | 400 | 0.2206 | 0.9267 |
| 0.2748 | 0.52 | 450 | 0.2358 | 0.9267 |
| 0.2448 | 0.58 | 500 | 0.2942 | 0.8933 |
| 0.2247 | 0.63 | 550 | 0.2410 | 0.9067 |
| 0.2002 | 0.69 | 600 | 0.2222 | 0.9133 |
| 0.2668 | 0.75 | 650 | 0.2372 | 0.9133 |
| 0.2701 | 0.81 | 700 | 0.2288 | 0.9333 |
| 0.2034 | 0.87 | 750 | 0.2415 | 0.9267 |
| 0.2374 | 0.92 | 800 | 0.2278 | 0.92 |
| 0.2305 | 0.98 | 850 | 0.2270 | 0.92 |
| 0.1704 | 1.04 | 900 | 0.2591 | 0.9333 |
| 0.1826 | 1.1 | 950 | 0.2481 | 0.9267 |
| 0.1116 | 1.15 | 1000 | 0.2906 | 0.9133 |
| 0.1527 | 1.21 | 1050 | 0.2902 | 0.92 |
| 0.1692 | 1.27 | 1100 | 0.2489 | 0.9333 |
| 0.158 | 1.33 | 1150 | 0.2576 | 0.9333 |
| 0.1608 | 1.38 | 1200 | 0.3344 | 0.9267 |
| 0.1194 | 1.44 | 1250 | 0.3615 | 0.9267 |
| 0.201 | 1.5 | 1300 | 0.3374 | 0.92 |
| 0.1938 | 1.56 | 1350 | 0.2847 | 0.92 |
| 0.1479 | 1.61 | 1400 | 0.3044 | 0.9267 |
| 0.1628 | 1.67 | 1450 | 0.2980 | 0.9267 |
| 0.1783 | 1.73 | 1500 | 0.3132 | 0.9267 |
| 0.1885 | 1.79 | 1550 | 0.2676 | 0.9333 |
| 0.1651 | 1.85 | 1600 | 0.2709 | 0.9333 |
| 0.1376 | 1.9 | 1650 | 0.2777 | 0.94 |
| 0.1571 | 1.96 | 1700 | 0.2761 | 0.9333 |
| 0.1561 | 2.02 | 1750 | 0.2912 | 0.94 |
| 0.1187 | 2.08 | 1800 | 0.2893 | 0.9467 |
| 0.1205 | 2.13 | 1850 | 0.2882 | 0.9467 |
| 0.0751 | 2.19 | 1900 | 0.3032 | 0.9467 |
| 0.1412 | 2.25 | 1950 | 0.2926 | 0.9467 |
| 0.0783 | 2.31 | 2000 | 0.2962 | 0.9467 |
| 0.1094 | 2.36 | 2050 | 0.2909 | 0.9333 |
| 0.1158 | 2.42 | 2100 | 0.3087 | 0.9333 |
| 0.0606 | 2.48 | 2150 | 0.3102 | 0.9467 |
| 0.1164 | 2.54 | 2200 | 0.2812 | 0.94 |
| 0.1311 | 2.6 | 2250 | 0.3736 | 0.9267 |
| 0.1087 | 2.65 | 2300 | 0.3069 | 0.94 |
| 0.109 | 2.71 | 2350 | 0.3176 | 0.94 |
| 0.0789 | 2.77 | 2400 | 0.3130 | 0.94 |
| 0.0784 | 2.83 | 2450 | 0.3338 | 0.94 |
| 0.1388 | 2.88 | 2500 | 0.3440 | 0.9333 |
| 0.1062 | 2.94 | 2550 | 0.2883 | 0.94 |
| 0.1016 | 3.0 | 2600 | 0.2776 | 0.94 |
| 0.0642 | 3.06 | 2650 | 0.3302 | 0.9333 |
| 0.052 | 3.11 | 2700 | 0.3217 | 0.94 |
| 0.0539 | 3.17 | 2750 | 0.3899 | 0.9267 |
| 0.0593 | 3.23 | 2800 | 0.3283 | 0.9467 |
| 0.0468 | 3.29 | 2850 | 0.3382 | 0.9467 |
| 0.0546 | 3.34 | 2900 | 0.3133 | 0.9467 |
| 0.107 | 3.4 | 2950 | 0.3550 | 0.94 |
| 0.1079 | 3.46 | 3000 | 0.3484 | 0.94 |
| 0.0782 | 3.52 | 3050 | 0.3313 | 0.94 |
| 0.0635 | 3.58 | 3100 | 0.3418 | 0.94 |
| 0.0771 | 3.63 | 3150 | 0.3685 | 0.9333 |
| 0.0629 | 3.69 | 3200 | 0.3467 | 0.9333 |
| 0.0552 | 3.75 | 3250 | 0.3677 | 0.94 |
| 0.0531 | 3.81 | 3300 | 0.3436 | 0.9333 |
| 0.0819 | 3.86 | 3350 | 0.3802 | 0.9333 |
| 0.0583 | 3.92 | 3400 | 0.3441 | 0.9333 |
| 0.0434 | 3.98 | 3450 | 0.3666 | 0.9333 |
| 0.0747 | 4.04 | 3500 | 0.3554 | 0.9333 |
| 0.0309 | 4.09 | 3550 | 0.3582 | 0.9333 |
| 0.1057 | 4.15 | 3600 | 0.3615 | 0.9267 |
| 0.0391 | 4.21 | 3650 | 0.3583 | 0.9267 |
| 0.0433 | 4.27 | 3700 | 0.3514 | 0.9333 |
| 0.0597 | 4.33 | 3750 | 0.3580 | 0.9333 |
| 0.0663 | 4.38 | 3800 | 0.3390 | 0.94 |
| 0.0563 | 4.44 | 3850 | 0.3518 | 0.9267 |
| 0.0702 | 4.5 | 3900 | 0.3542 | 0.9267 |
| 0.0383 | 4.56 | 3950 | 0.3528 | 0.9267 |
| 0.0474 | 4.61 | 4000 | 0.3485 | 0.9333 |
| 0.0265 | 4.67 | 4050 | 0.3489 | 0.94 |
| 0.0165 | 4.73 | 4100 | 0.3616 | 0.9333 |
| 0.0489 | 4.79 | 4150 | 0.3579 | 0.9333 |
| 0.0478 | 4.84 | 4200 | 0.3603 | 0.9333 |
| 0.0536 | 4.9 | 4250 | 0.3666 | 0.9267 |
| 0.0551 | 4.96 | 4300 | 0.3664 | 0.9267 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.13.0
- Datasets 2.3.2
- Tokenizers 0.13.1
|
CohleM/bert-nepali-tokenizer | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: gpl-3.0
---
# Thyroid BRAF-RAS Score (BRS) GAN v1 Model Card
This model card describes a model associated with a manuscript that is currently under review. Links to the manuscript will be provided once publicly available.
## Model Details
- **Developed by:** James Dolezal
- **Model type:** Generative adversarial network
- **Language(s):** English
- **License:** GPL-3.0
- **Model Description:** This is a StyleGAN2 model that can generate synthetic H&E pathologic images of thyroid neoplasms. The GAN is conditioned on discretized BRAF-RAS Score (BRS), a a gene expression score scaled from -1 (BRAF-like) to +1 (RAS-like) indicating how similar a tumor's gene expression is to a BRAF-mutant and RAS-mutant tumor. The GAN has been conditioned on the categories BRAF-like (=0) and RAS-like (=1).
- **Image processing:** This model generates images at 512 x 512 px resolution and was trained on lossless (PNG) pathologic images at 302 x 302 μm magnification.
- **Resources for more information:** [GitHub Repository](https://github.com/jamesdolezal/histologic-sheep)
# Uses
## Examples
This model is a [StyleGAN2](https://github.com/NVlabs/stylegan3) model and can be used with any StyleGAN-compatible scripts and tools. The [GitHub repository](https://github.com/jamesdolezal/histologic-sheep) associated with his model includes detailed information on how to interface with the GAN, generate images, and perform class blending via embedding interpolation.
## Direct Use
This model is intended for research purposes only. Possible research areas and tasks include
- Applications in educational settings.
- Research on pathology classification models for thyroid neoplasms.
Excluded uses are described below.
### Misuse and Out-of-Scope Use
Output from this model should not be used in a clinical setting or be provided to patients, physicians, or any other health care members directly involved in their health care outside the context of an approved research protocol. Using the model in a clinical setting outside the context of an approved research protocol is a misuse of this model. This includes influencing a patient's health care treatment in any way based on output from this model.
### Limitations
The model has not been validated in contexts where non-thyroid neoplasms, or rare thyroid subtypes such as anaplastic thyroid carcinoma, are possible.
### Bias
This model was trained on The Cancer Genome Atlas (TCGA), which contains patient data from communities and cultures which may not reflect the general population. This datasets is comprised of images from multiple institutions, which may introduce a potential source of bias from site-specific batch effects ([Howard, 2021](https://www.nature.com/articles/s41467-021-24698-1)).
## Training
**Training Data**
The following dataset was used to train the model:
- The Cancer Genome Atlas (TCGA), THCA cohort (see next section)
This model was trained on a total of 369 slides, with 116 BRAF-like tumors and 271 RAS-like tumors.
**Training Procedure**
Each whole-slide image was sectioned into smaller images in a grid-wise fashion in order to extract tiles from whole-slide images at 302 x 302 μm. Image tiles were extracted at the nearest downsample layer, and resized to 512 x 512 px using [Libvips](https://www.libvips.org/API/current/libvips-resample.html#vips-resize). During training, images are randomly flipped and rotated (90, 180, 270). Training is otherwise identical to the official StyleGAN2 implementation.
Additional training information:
- **Hardware:** 2 x A100 GPUs
- **Batch size:** 16
- **R1 gamma:** 3.2768
- **Training time:** 12,720 kimg
## Evaluation Results
External evaluation results are currently under peer review and will be posted once publicly available. |
Coldestadam/Breakout_Mentors_SpongeBob_Model | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 10 | null | ---
license: creativeml-openrail-m
---
Stable Diffusion model trained using dreambooth to create pixel art, in 2 styles
the sprite art can be used with the trigger word "pixelsprite"
the scene art can be used with the trigger word "16bitscene"
the art is not pixel perfect, but it can be fixed with pixelating tools like https://pinetools.com/pixelate-effect-image (they also have bulk pixelation)
some example generations







|
ComCom/gpt2-large | [
"pytorch",
"gpt2",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"GPT2Model"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
language: en
thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1578486587171782656/vX6FFz3G_400x400.jpg')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
<div
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">angelicism0666</div>
<div style="text-align: center; font-size: 14px;">@angelicism0666</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from angelicism0666.
| Data | angelicism0666 |
| --- | --- |
| Tweets downloaded | 1459 |
| Retweets | 442 |
| Short tweets | 346 |
| Tweets kept | 671 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1jmeiayq/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @angelicism0666's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1xz6slmm) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1xz6slmm/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/angelicism0666')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
ComCom/gpt2 | [
"pytorch",
"gpt2",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"GPT2Model"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
language:
- en
license: mit
tags:
- generated_from_trainer
datasets:
- tomekkorbak/pii-pile-chunk3-0-50000
- tomekkorbak/pii-pile-chunk3-50000-100000
- tomekkorbak/pii-pile-chunk3-100000-150000
- tomekkorbak/pii-pile-chunk3-150000-200000
- tomekkorbak/pii-pile-chunk3-200000-250000
- tomekkorbak/pii-pile-chunk3-250000-300000
- tomekkorbak/pii-pile-chunk3-300000-350000
- tomekkorbak/pii-pile-chunk3-350000-400000
- tomekkorbak/pii-pile-chunk3-400000-450000
- tomekkorbak/pii-pile-chunk3-450000-500000
- tomekkorbak/pii-pile-chunk3-500000-550000
- tomekkorbak/pii-pile-chunk3-550000-600000
- tomekkorbak/pii-pile-chunk3-600000-650000
- tomekkorbak/pii-pile-chunk3-650000-700000
- tomekkorbak/pii-pile-chunk3-700000-750000
- tomekkorbak/pii-pile-chunk3-750000-800000
- tomekkorbak/pii-pile-chunk3-800000-850000
- tomekkorbak/pii-pile-chunk3-850000-900000
- tomekkorbak/pii-pile-chunk3-900000-950000
- tomekkorbak/pii-pile-chunk3-950000-1000000
- tomekkorbak/pii-pile-chunk3-1000000-1050000
- tomekkorbak/pii-pile-chunk3-1050000-1100000
- tomekkorbak/pii-pile-chunk3-1100000-1150000
- tomekkorbak/pii-pile-chunk3-1150000-1200000
- tomekkorbak/pii-pile-chunk3-1200000-1250000
- tomekkorbak/pii-pile-chunk3-1250000-1300000
- tomekkorbak/pii-pile-chunk3-1300000-1350000
- tomekkorbak/pii-pile-chunk3-1350000-1400000
- tomekkorbak/pii-pile-chunk3-1400000-1450000
- tomekkorbak/pii-pile-chunk3-1450000-1500000
- tomekkorbak/pii-pile-chunk3-1500000-1550000
- tomekkorbak/pii-pile-chunk3-1550000-1600000
- tomekkorbak/pii-pile-chunk3-1600000-1650000
- tomekkorbak/pii-pile-chunk3-1650000-1700000
- tomekkorbak/pii-pile-chunk3-1700000-1750000
- tomekkorbak/pii-pile-chunk3-1750000-1800000
- tomekkorbak/pii-pile-chunk3-1800000-1850000
- tomekkorbak/pii-pile-chunk3-1850000-1900000
- tomekkorbak/pii-pile-chunk3-1900000-1950000
model-index:
- name: tomekkorbak/test-pii-2533
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# tomekkorbak/test-pii-2533
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.1
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.01
- training_steps: 1
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.20.1
- Pytorch 1.11.0+cu113
- Datasets 2.5.1
- Tokenizers 0.11.6
|
ComCom-Dev/gpt2-bible-test | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | # IS2Project
This is a Customer Sentiment Analysis for Code-Switched Language: A Case of Safaricom Limited. The proposed model will be able to detect customer sentiment analysis in the code-switched pair (English-Swahili) for Safaricom users using Support Vector Machines. The model will be able to categorize tweets into good reviews and bad reviews.
The model is also compared with Logistic Regression and Naives Bayes to see which model performs the best.
|
Cometasonmi451/Mine | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- fr
license: apache-2.0
tags:
- hf-asr-leaderboard
- generated_from_trainer
datasets:
- mozilla-foundation/common_voice_11_0
model-index:
- name: Whisper Small Fri - Despres Julien
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Whisper Small Fri - Despres Julien
This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-06
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 600
- training_steps: 6000
- mixed_precision_training: Native AMP
### Framework versions
- Transformers 4.25.0.dev0
- Pytorch 1.11.0
- Datasets 2.5.2
- Tokenizers 0.12.1
|
Connor/DialoGPT-small-rick | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 88 with parameters:
```
{'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 88,
"warmup_steps": 9,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
(2): Normalize()
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Connorvr/TeachingGen | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"generated_from_trainer",
"license:mit"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 4 | null | ---
license: mit
tags:
- generated_from_trainer
datasets:
- squad
model-index:
- name: deberta-base-finetuned-squad-pruned0.1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# deberta-base-finetuned-squad-pruned0.1
This model is a fine-tuned version of [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) on the squad dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3741
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 16
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.4425 | 1.0 | 5533 | 2.3741 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Contrastive-Tension/BERT-Base-CT-STSb | [
"pytorch",
"tf",
"jax",
"bert",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: mit
tags:
- generated_from_trainer
model-index:
- name: BERiT_52000
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BERiT_52000
This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 8.6394
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 8.9728 | 0.19 | 500 | 8.6854 |
| 8.7387 | 0.39 | 1000 | 8.7712 |
| 8.6739 | 0.58 | 1500 | 8.7362 |
| 8.786 | 0.77 | 2000 | 8.7816 |
| 8.6918 | 0.97 | 2500 | 8.6802 |
| 8.595 | 1.16 | 3000 | 8.7086 |
| 8.5342 | 1.36 | 3500 | 8.6558 |
| 8.6484 | 1.55 | 4000 | 8.7442 |
| 8.5594 | 1.74 | 4500 | 8.7238 |
| 8.4791 | 1.94 | 5000 | 8.7073 |
| 8.4489 | 2.13 | 5500 | 8.6470 |
| 8.42 | 2.32 | 6000 | 8.7016 |
| 8.4389 | 2.52 | 6500 | 8.6039 |
| 8.5176 | 2.71 | 7000 | 8.6179 |
| 8.5392 | 2.9 | 7500 | 8.6394 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu113
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Contrastive-Tension/BERT-Base-Swe-CT-STSb | [
"pytorch",
"tf",
"jax",
"bert",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"BertModel"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 126 | null | ---
license: gpl-3.0
---
# Breast Estrogen Receptor (ER) GAN v1 Model Card
This model card describes a model associated with a manuscript that is currently under review. Links to the manuscript will be provided once publicly available.
## Model Details
- **Developed by:** James Dolezal
- **Model type:** Generative adversarial network
- **Language(s):** English
- **License:** GPL-3.0
- **Model Description:** This is a StyleGAN2 model that can generate synthetic H&E pathologic images of breast cancer. The GAN is conditioned on estrogen receptor (ER) status as determined by immunohistochemical testing, with categories ER-negative (=0) and ER-positive (=1).
- **Image processing:** This model generates images at 512 x 512 px resolution and was trained on lossless (PNG) pathologic images at 400 x 400 μm magnification.
- **Resources for more information:** [GitHub Repository](https://github.com/jamesdolezal/histologic-sheep)
# Uses
## Examples
This model is a [StyleGAN2](https://github.com/NVlabs/stylegan3) model and can be used with any StyleGAN-compatible scripts and tools. The [GitHub repository](https://github.com/jamesdolezal/histologic-sheep) associated with his model includes detailed information on how to interface with the GAN, generate images, and perform class blending via embedding interpolation.
## Direct Use
This model is intended for research purposes only. Possible research areas and tasks include
- Applications in educational settings.
- Research on pathology classification models for breast cancer.
Excluded uses are described below.
### Misuse and Out-of-Scope Use
Output from this model should not be used in a clinical setting or be provided to patients, physicians, or any other health care members directly involved in their health care outside the context of an approved research protocol. Using the model in a clinical setting outside the context of an approved research protocol is a misuse of this model. This includes influencing a patient's health care treatment in any way based on output from this model.
### Limitations
The model does not generate images reflective of estrogen receptor status in a manner which controls for possible underlying biological bias, such tumor grade or histological subtype.
### Bias
This model was trained on The Cancer Genome Atlas (TCGA), which contains patient data from communities and cultures which may not reflect the general population. This datasets is comprised of images from multiple institutions, which may introduce a potential source of bias from site-specific batch effects ([Howard, 2021](https://www.nature.com/articles/s41467-021-24698-1)).
## Training
**Training Data**
The following dataset was used to train the model:
- The Cancer Genome Atlas (TCGA), THCA cohort (see next section)
This model was trained on a total of 1,048 slides, with 228 ER-negative tumor and 820 ER-positive tumors.
**Training Procedure**
Each whole-slide image was sectioned into smaller images in a grid-wise fashion in order to extract tiles from whole-slide images at 400 x 400 μm. Image tiles were extracted at the nearest downsample layer, and resized to 512 x 512 px using [Libvips](https://www.libvips.org/API/current/libvips-resample.html#vips-resize). During training, images are randomly flipped and rotated (90, 180, 270). Training is otherwise identical to the official StyleGAN2 implementation.
Additional training information:
- **Hardware:** 4 x A100 GPUs
- **Batch size:** 32
- **R1 gamma:** 1.6384
- **Training time:** 10,000 kimg
## Evaluation Results
External evaluation results are currently under peer review and will be posted once publicly available. |
Contrastive-Tension/BERT-Distil-CT-STSb | [
"pytorch",
"tf",
"distilbert",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"DistilBertModel"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 88 with parameters:
```
{'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 88,
"warmup_steps": 9,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
(2): Normalize()
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Contrastive-Tension/BERT-Distil-NLI-CT | [
"pytorch",
"tf",
"distilbert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"DistilBertForMaskedLM"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 6 | null | ---
license: gpl-3.0
---
# Lung Adeno/Squam GAN v1 Model Card
This model card describes a model associated with a manuscript that is currently under review. Links to the manuscript will be provided once publicly available.
## Model Details
- **Developed by:** James Dolezal
- **Model type:** Generative adversarial network
- **Language(s):** English
- **License:** GPL-3.0
- **Model Description:** This is a StyleGAN2 model that can generate synthetic H&E pathologic images of lung cancer. The GAN is conditioned on histologic subtype, with categories adenocarcinoma (=0) and squamous cell carcinoma (=1).
- **Image processing:** This model generates images at 512 x 512 px resolution and was trained on lossless (PNG) pathologic images at 400 x 400 μm magnification.
- **Resources for more information:** [GitHub Repository](https://github.com/jamesdolezal/histologic-sheep)
# Uses
## Examples
This model is a [StyleGAN2](https://github.com/NVlabs/stylegan3) model and can be used with any StyleGAN-compatible scripts and tools. The [GitHub repository](https://github.com/jamesdolezal/histologic-sheep) associated with his model includes detailed information on how to interface with the GAN, generate images, and perform class blending via embedding interpolation.
## Direct Use
This model is intended for research purposes only. Possible research areas and tasks include
- Applications in educational settings.
- Research on pathology classification models for lung cancer.
Excluded uses are described below.
### Misuse and Out-of-Scope Use
Output from this model should not be used in a clinical setting or be provided to patients, physicians, or any other health care members directly involved in their health care outside the context of an approved research protocol. Using the model in a clinical setting outside the context of an approved research protocol is a misuse of this model. This includes influencing a patient's health care treatment in any way based on output from this model.
### Limitations
The training dataset did not include adenosquamous tumors, so intermediate states represented by the GAN through embedding interpolation may or may not be biologically consistent with the truly intermediate adenosquamous tumors.
### Bias
This model was trained on The Cancer Genome Atlas (TCGA), which contains patient data from communities and cultures which may not reflect the general population. This datasets is comprised of images from multiple institutions, which may introduce a potential source of bias from site-specific batch effects ([Howard, 2021](https://www.nature.com/articles/s41467-021-24698-1)).
## Training
**Training Data**
The following dataset was used to train the model:
- The Cancer Genome Atlas (TCGA), LUAD (adenocarcinoma) and LUSC (squamous cell carcinoma) cohorts (see next section)
This model was trained on a total of 941 slides, with 467 adenocarcinomas and 474 squamous cell carcinomas.
**Training Procedure**
Each whole-slide image was sectioned into smaller images in a grid-wise fashion in order to extract tiles from whole-slide images at 400 x 400 μm. Image tiles were extracted at the nearest downsample layer, and resized to 512 x 512 px using [Libvips](https://www.libvips.org/API/current/libvips-resample.html#vips-resize). During training, images are randomly flipped and rotated (90, 180, 270). Training is otherwise identical to the official StyleGAN2 implementation.
Additional training information:
- **Hardware:** 4 x A100 GPUs
- **Batch size:** 32
- **R1 gamma:** 1.6384
- **Training time:** 25,000 kimg
## Evaluation Results
External evaluation results are currently under peer review and will be posted once publicly available. |
Contrastive-Tension/RoBerta-Large-CT-STSb | [
"pytorch",
"tf",
"jax",
"roberta",
"feature-extraction",
"transformers"
]
| feature-extraction | {
"architectures": [
"RobertaModel"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- accuracy
model-index:
- name: TSE_ALBERT_5E
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# TSE_ALBERT_5E
This model is a fine-tuned version of [albert-base-v2](https://huggingface.co/albert-base-v2) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.3667
- Accuracy: 0.9333
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.5712 | 0.06 | 50 | 0.4047 | 0.82 |
| 0.3198 | 0.12 | 100 | 0.2883 | 0.9 |
| 0.3254 | 0.17 | 150 | 0.4352 | 0.84 |
| 0.2898 | 0.23 | 200 | 0.2892 | 0.9133 |
| 0.2826 | 0.29 | 250 | 0.3565 | 0.8867 |
| 0.2696 | 0.35 | 300 | 0.2263 | 0.9333 |
| 0.274 | 0.4 | 350 | 0.2068 | 0.94 |
| 0.2393 | 0.46 | 400 | 0.2270 | 0.9333 |
| 0.2067 | 0.52 | 450 | 0.2118 | 0.9333 |
| 0.2332 | 0.58 | 500 | 0.4454 | 0.88 |
| 0.3099 | 0.63 | 550 | 0.2777 | 0.9067 |
| 0.2687 | 0.69 | 600 | 0.2077 | 0.9333 |
| 0.2053 | 0.75 | 650 | 0.1923 | 0.9533 |
| 0.2359 | 0.81 | 700 | 0.3891 | 0.9067 |
| 0.2492 | 0.87 | 750 | 0.2765 | 0.9333 |
| 0.2589 | 0.92 | 800 | 0.1879 | 0.9467 |
| 0.2161 | 0.98 | 850 | 0.2733 | 0.9267 |
| 0.1752 | 1.04 | 900 | 0.3108 | 0.92 |
| 0.2213 | 1.1 | 950 | 0.3318 | 0.92 |
| 0.1665 | 1.15 | 1000 | 0.4124 | 0.8933 |
| 0.1832 | 1.21 | 1050 | 0.3448 | 0.92 |
| 0.1671 | 1.27 | 1100 | 0.3343 | 0.9067 |
| 0.184 | 1.33 | 1150 | 0.3929 | 0.9067 |
| 0.2788 | 1.38 | 1200 | 0.3888 | 0.8933 |
| 0.1768 | 1.44 | 1250 | 0.4028 | 0.9 |
| 0.2368 | 1.5 | 1300 | 0.3154 | 0.9133 |
| 0.2055 | 1.56 | 1350 | 0.2603 | 0.9267 |
| 0.1693 | 1.61 | 1400 | 0.2994 | 0.9267 |
| 0.1447 | 1.67 | 1450 | 0.3247 | 0.9267 |
| 0.226 | 1.73 | 1500 | 0.3410 | 0.9267 |
| 0.1744 | 1.79 | 1550 | 0.3105 | 0.9267 |
| 0.1943 | 1.85 | 1600 | 0.2760 | 0.94 |
| 0.2093 | 1.9 | 1650 | 0.2087 | 0.9467 |
| 0.2027 | 1.96 | 1700 | 0.2773 | 0.9333 |
| 0.1806 | 2.02 | 1750 | 0.3386 | 0.9267 |
| 0.1161 | 2.08 | 1800 | 0.4301 | 0.9067 |
| 0.0916 | 2.13 | 1850 | 0.3693 | 0.92 |
| 0.1586 | 2.19 | 1900 | 0.2929 | 0.94 |
| 0.1336 | 2.25 | 1950 | 0.4015 | 0.9133 |
| 0.1746 | 2.31 | 2000 | 0.3027 | 0.92 |
| 0.1353 | 2.36 | 2050 | 0.3224 | 0.9267 |
| 0.116 | 2.42 | 2100 | 0.3609 | 0.9267 |
| 0.1807 | 2.48 | 2150 | 0.3044 | 0.9267 |
| 0.1016 | 2.54 | 2200 | 0.3706 | 0.9133 |
| 0.0634 | 2.6 | 2250 | 0.3391 | 0.92 |
| 0.167 | 2.65 | 2300 | 0.3463 | 0.92 |
| 0.1718 | 2.71 | 2350 | 0.3254 | 0.92 |
| 0.1269 | 2.77 | 2400 | 0.2640 | 0.9333 |
| 0.1848 | 2.83 | 2450 | 0.2660 | 0.9267 |
| 0.116 | 2.88 | 2500 | 0.2532 | 0.94 |
| 0.1804 | 2.94 | 2550 | 0.3538 | 0.92 |
| 0.1315 | 3.0 | 2600 | 0.4146 | 0.9067 |
| 0.1024 | 3.06 | 2650 | 0.2899 | 0.9333 |
| 0.0904 | 3.11 | 2700 | 0.3191 | 0.9333 |
| 0.0596 | 3.17 | 2750 | 0.3569 | 0.9333 |
| 0.1144 | 3.23 | 2800 | 0.3373 | 0.9267 |
| 0.0782 | 3.29 | 2850 | 0.3447 | 0.9267 |
| 0.064 | 3.34 | 2900 | 0.2932 | 0.94 |
| 0.118 | 3.4 | 2950 | 0.3099 | 0.94 |
| 0.1286 | 3.46 | 3000 | 0.3404 | 0.9267 |
| 0.0963 | 3.52 | 3050 | 0.4026 | 0.9067 |
| 0.1158 | 3.58 | 3100 | 0.3320 | 0.9267 |
| 0.0967 | 3.63 | 3150 | 0.2984 | 0.94 |
| 0.1122 | 3.69 | 3200 | 0.3149 | 0.9333 |
| 0.134 | 3.75 | 3250 | 0.3804 | 0.9133 |
| 0.0953 | 3.81 | 3300 | 0.3670 | 0.92 |
| 0.0776 | 3.86 | 3350 | 0.4140 | 0.92 |
| 0.0813 | 3.92 | 3400 | 0.3654 | 0.9333 |
| 0.0406 | 3.98 | 3450 | 0.4364 | 0.92 |
| 0.0538 | 4.04 | 3500 | 0.3553 | 0.94 |
| 0.0734 | 4.09 | 3550 | 0.3814 | 0.9267 |
| 0.0396 | 4.15 | 3600 | 0.3978 | 0.9267 |
| 0.0427 | 4.21 | 3650 | 0.4333 | 0.92 |
| 0.1472 | 4.27 | 3700 | 0.3816 | 0.92 |
| 0.0587 | 4.33 | 3750 | 0.3624 | 0.92 |
| 0.0549 | 4.38 | 3800 | 0.3461 | 0.9333 |
| 0.0606 | 4.44 | 3850 | 0.3562 | 0.94 |
| 0.0483 | 4.5 | 3900 | 0.3655 | 0.9333 |
| 0.0351 | 4.56 | 3950 | 0.3613 | 0.9333 |
| 0.0763 | 4.61 | 4000 | 0.3641 | 0.94 |
| 0.0835 | 4.67 | 4050 | 0.3669 | 0.9333 |
| 0.0542 | 4.73 | 4100 | 0.3569 | 0.9333 |
| 0.0804 | 4.79 | 4150 | 0.3575 | 0.9333 |
| 0.0336 | 4.84 | 4200 | 0.3655 | 0.9333 |
| 0.0631 | 4.9 | 4250 | 0.3646 | 0.9333 |
| 0.0183 | 4.96 | 4300 | 0.3667 | 0.9333 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.13.0
- Datasets 2.3.2
- Tokenizers 0.13.1
|
Cooker/cicero-similis | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: whisper_havest_0005
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# whisper_havest_0005
This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 6.4115
- Train Accuracy: 0.0115
- Train Do Wer: 1.0
- Validation Loss: 6.2357
- Validation Accuracy: 0.0115
- Validation Do Wer: 1.0
- Epoch: 4
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Accuracy | Train Do Wer | Validation Loss | Validation Accuracy | Validation Do Wer | Epoch |
|:----------:|:--------------:|:------------:|:---------------:|:-------------------:|:-----------------:|:-----:|
| 9.9191 | 0.0046 | 1.0 | 8.5836 | 0.0067 | 1.0 | 0 |
| 8.0709 | 0.0083 | 1.0 | 7.4667 | 0.0089 | 1.0 | 1 |
| 7.1652 | 0.0100 | 1.0 | 6.8204 | 0.0112 | 1.0 | 2 |
| 6.7196 | 0.0114 | 1.0 | 6.5192 | 0.0114 | 1.0 | 3 |
| 6.4115 | 0.0115 | 1.0 | 6.2357 | 0.0115 | 1.0 | 4 |
### Framework versions
- Transformers 4.25.0.dev0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Cool/Demo | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
title: Finetuned Diffusion
emoji: 🪄🖼️
colorFrom: red
colorTo: pink
sdk: gradio
sdk_version: 3.18.0
app_file: app.py
pinned: true
license: mit
---
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
Coolhand/Sentiment | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
tags:
- generated_from_trainer
datasets:
- xtreme
metrics:
- f1
model-index:
- name: xlm-roberta-base-finetuned-panx-fr
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: xtreme
type: xtreme
args: PAN-X.fr
metrics:
- name: F1
type: f1
value: 0.8346456692913387
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# xlm-roberta-base-finetuned-panx-fr
This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2763
- F1: 0.8346
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | F1 |
|:-------------:|:-----:|:----:|:---------------:|:------:|
| 0.5779 | 1.0 | 191 | 0.3701 | 0.7701 |
| 0.2735 | 2.0 | 382 | 0.2908 | 0.8254 |
| 0.1769 | 3.0 | 573 | 0.2763 | 0.8346 |
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.1+cu113
- Datasets 1.16.1
- Tokenizers 0.10.3
|
CopymySkill/DialoGPT-medium-atakan | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-1
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
metrics:
- type: mean_reward
value: 156.20 +/- 43.02
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
|
Corvus/DialoGPT-medium-CaptainPrice-Extended | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
license: apache-2.0
tags:
- generated_from_keras_callback
model-index:
- name: whisper_havest_0010
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# whisper_havest_0010
This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 5.1222
- Train Accuracy: 0.0117
- Train Do Wer: 1.0
- Validation Loss: 5.1600
- Validation Accuracy: 0.0117
- Validation Do Wer: 1.0
- Epoch: 9
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 1e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01}
- training_precision: float32
### Training results
| Train Loss | Train Accuracy | Train Do Wer | Validation Loss | Validation Accuracy | Validation Do Wer | Epoch |
|:----------:|:--------------:|:------------:|:---------------:|:-------------------:|:-----------------:|:-----:|
| 9.9191 | 0.0046 | 1.0 | 8.5836 | 0.0067 | 1.0 | 0 |
| 8.0709 | 0.0083 | 1.0 | 7.4667 | 0.0089 | 1.0 | 1 |
| 7.1652 | 0.0100 | 1.0 | 6.8204 | 0.0112 | 1.0 | 2 |
| 6.7196 | 0.0114 | 1.0 | 6.5192 | 0.0114 | 1.0 | 3 |
| 6.4115 | 0.0115 | 1.0 | 6.2357 | 0.0115 | 1.0 | 4 |
| 6.1085 | 0.0115 | 1.0 | 5.9657 | 0.0115 | 1.0 | 5 |
| 5.8206 | 0.0115 | 1.0 | 5.7162 | 0.0115 | 1.0 | 6 |
| 5.5567 | 0.0115 | 1.0 | 5.4963 | 0.0115 | 1.0 | 7 |
| 5.3223 | 0.0116 | 1.0 | 5.3096 | 0.0116 | 1.0 | 8 |
| 5.1222 | 0.0117 | 1.0 | 5.1600 | 0.0117 | 1.0 | 9 |
### Framework versions
- Transformers 4.25.0.dev0
- TensorFlow 2.9.2
- Datasets 2.6.1
- Tokenizers 0.13.2
|
CouchCat/ma_mlc_v7_distil | [
"pytorch",
"distilbert",
"text-classification",
"en",
"transformers",
"multi-label",
"license:mit"
]
| text-classification | {
"architectures": [
"DistilBertForSequenceClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 29 | null | ---
language:
- en
tags:
- QA
license: cc-by-4.0
datasets:
- BoolQ
- CommonSenseQA
- DROP
- DuoRC
- HellaSWAG
- HotpotQA
- HybridQA
- NarrativeQA
- NaturalQuestionsShort
- NewsQA
- QAMR
- RACE
- SearchQA
- SIQA
- SQuAD
- TriviaQA-web
metrics:
- Accuracy
- Precision
- Recall
- F1
- MRR
- R@3
- R@5
---
BERT for Sequence Classification trained on QA Dataset prediction task.
- Input: question.
- Output: dataset from where that question comes from.
Original paper: TWEAC: Transformer with Extendable QA Agent Classifiers
https://arxiv.org/abs/2104.07081
Datasets used for training:
```
list_datasets = ['BoolQ','CommonSenseQA','DROP','DuoRC','HellaSWAG','HotpotQA','HybridQA','NarrativeQA','NaturalQuestionsShort','NewsQA','QAMR','RACE','SearchQA','SIQA','SQuAD','TriviaQA-web']
```
Results for all datasets:
- Accuracy: 0.7919096825783123
- Precision: 0.731586272892176
- Recall: 0.7919096825783123
- F1: 0.7494425609552463
- MRR: 0.8720871733637521
- R@3: 0.9438690810655046
- R@5: 0.9745318608004427
- Queries/second: 6052.33538824659
Results per dataset:
```
"BoolQ": {
"accuracy": 0.998776758409786,
"mrr": 0.999388379204893,
"r@3": 1.0,
"r@5": 1.0,
"query_per_second": 6978.947907596168,
"precision": 0.8649364406779662,
"recall": 0.998776758409786,
"f1": 0.9270508089696281
},
"CommonSenseQA": {
"accuracy": 0.9247135842880524,
"mrr": 0.9476358338878795,
"r@3": 0.9705400981996727,
"r@5": 0.9705400981996727,
"query_per_second": 5823.984138936813,
"precision": 0.442443226311668,
"recall": 0.9247135842880524,
"f1": 0.5985169491525425
},
"DROP": {
"accuracy": 0.9075083892617449,
"mrr": 0.9378200367399193,
"r@3": 0.9609899328859061,
"r@5": 0.9786073825503355,
"query_per_second": 6440.988897129248,
"precision": 0.8636726546906187,
"recall": 0.9075083892617449,
"f1": 0.8850480670893842
},
"DuoRC": {
"accuracy": 0.5555803405457654,
"mrr": 0.7368963429107307,
"r@3": 0.9092125808610305,
"r@5": 0.9596996059186557,
"query_per_second": 6853.643198794893,
"precision": 0.646814404432133,
"recall": 0.5555803405457654,
"f1": 0.5977360905563778
},
"HellaSWAG": {
"accuracy": 0.998406691894045,
"mrr": 0.9990705702715262,
"r@3": 1.0,
"r@5": 1.0,
"query_per_second": 3091.5012960785157,
"precision": 0.9974134500596896,
"recall": 0.998406691894045,
"f1": 0.9979098238280083
},
"HotpotQA": {
"accuracy": 0.7414435784479837,
"mrr": 0.8435804344945315,
"r@3": 0.9325652321247034,
"r@5": 0.973568281938326,
"query_per_second": 4972.668019223381,
"precision": 0.7352150537634409,
"recall": 0.7414435784479837,
"f1": 0.7383161801923401
},
"HybridQA": {
"accuracy": 0.7934218118869013,
"mrr": 0.8806947764680021,
"r@3": 0.964800923254472,
"r@5": 0.9930755914598961,
"query_per_second": 4886.494046259562,
"precision": 0.7198952879581152,
"recall": 0.7934218118869013,
"f1": 0.7548723579467472
},
"NarrativeQA": {
"accuracy": 0.5623756749076442,
"mrr": 0.7416681781060867,
"r@3": 0.9011082693947144,
"r@5": 0.9580373212086767,
"query_per_second": 7081.067049796865,
"precision": 0.5623224095472628,
"recall": 0.5623756749076442,
"f1": 0.5623490409661377
},
"NaturalQuestionsShort": {
"accuracy": 0.7985353692739171,
"mrr": 0.8743599435345307,
"r@3": 0.9439077594266126,
"r@5": 0.9774072919912745,
"query_per_second": 7136.590426649795,
"precision": 0.7963020509633313,
"recall": 0.7985353692739171,
"f1": 0.7974171464135678
},
"NewsQA": {
"accuracy": 0.5375118708452041,
"mrr": 0.71192075967717,
"r@3": 0.855650522317189,
"r@5": 0.939696106362773,
"query_per_second": 7193.851409052092,
"precision": 0.18757249378624688,
"recall": 0.5375118708452041,
"f1": 0.2780985136961061
},
"QAMR": {
"accuracy": 0.6658497602557272,
"mrr": 0.7969741223377345,
"r@3": 0.9207778369738945,
"r@5": 0.973361747469366,
"query_per_second": 7321.775044800525,
"precision": 0.8654525309881587,
"recall": 0.6658497602557272,
"f1": 0.7526421968624852
},
"RACE": {
"accuracy": 0.8771538617474154,
"mrr": 0.917901778042666,
"r@3": 0.9489154672613015,
"r@5": 0.9693898236367322,
"query_per_second": 6952.225120744351,
"precision": 0.8767983789260385,
"recall": 0.8771538617474154,
"f1": 0.8769760843129306
},
"SearchQA": {
"accuracy": 0.9762073027090695,
"mrr": 0.9865069592101393,
"r@3": 0.9972909305064782,
"r@5": 0.9984687868080094,
"query_per_second": 4031.0193826035634,
"precision": 0.9870191735143503,
"recall": 0.9762073027090695,
"f1": 0.9815834665719192
},
"SIQA": {
"accuracy": 0.9969293756397134,
"mrr": 0.9977823268509042,
"r@3": 0.9979529170931423,
"r@5": 1.0,
"query_per_second": 6711.547709005977,
"precision": 0.9329501915708812,
"recall": 0.9969293756397134,
"f1": 0.9638792676892627
},
"SQuAD": {
"accuracy": 0.550628092881614,
"mrr": 0.7164538452390565,
"r@3": 0.8660068519223448,
"r@5": 0.9366197183098591,
"query_per_second": 7033.420124363291,
"precision": 0.48613678373382624,
"recall": 0.550628092881614,
"f1": 0.5163766175814368
},
"TriviaQA-web": {
"accuracy": 0.7855124582584125,
"mrr": 0.8647404868442627,
"r@3": 0.9321859748266119,
"r@5": 0.9640380169535063,
"query_per_second": 4327.642440910395,
"precision": 0.7404358353510896,
"recall": 0.7855124582584125,
"f1": 0.7623083634550667
},
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.