Search is not available for this dataset
pipeline_tag
stringclasses 48
values | library_name
stringclasses 205
values | text
stringlengths 0
18.3M
| metadata
stringlengths 2
1.07B
| id
stringlengths 5
122
| last_modified
null | tags
listlengths 1
1.84k
| sha
null | created_at
stringlengths 25
25
|
---|---|---|---|---|---|---|---|---|
text-classification | transformers | {} | Jeevesh8/multiberts_seed_9_ft_2 | null | [
"transformers",
"jax",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | Jeevesh8/multiberts_seed_9_ft_3 | null | [
"transformers",
"jax",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | Jeevesh8/multiberts_seed_9_ft_4 | null | [
"transformers",
"jax",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jeevesh8/sMLM-256-LF | null | [
"transformers",
"pytorch",
"longformer",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jeevesh8/sMLM-LF | null | [
"transformers",
"pytorch",
"longformer",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jeevesh8/sMLM-RoBERTa | null | [
"transformers",
"pytorch",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jeevesh8/sMLM-bert | null | [
"transformers",
"pytorch",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jeevesh8/test | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JeffZl/DialoGPT-small-harrypotter | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {"tags": ["conversational"]} | Jeffrey/DialoGPT-small-Jeffrey | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jeffy/DialoGPT-small-spongebob | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jennie/Jennie | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jeremie24/JEREMIE | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JeremyS/distilbert-base-uncased-finetuned-cola | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jerr/model_name | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jerry/bert-analysis | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | JerryQu/v2-distilgpt2 | null | [
"transformers",
"pytorch",
"jax",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JerukPurut/DialoGPT-small-natwithaheart | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jeska/BERTJEforTextClassificationVaccinChat | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialData
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.2608
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| No log | 1.0 | 297 | 2.2419 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0+cu111
- Datasets 1.15.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialData", "results": []}]} | Jeska/BertjeWDialData | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALL
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9469
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.1739 | 1.0 | 1542 | 2.0150 |
| 2.0759 | 2.0 | 3084 | 1.9918 |
| 2.0453 | 3.0 | 4626 | 2.0132 |
| 1.9936 | 4.0 | 6168 | 1.9341 |
| 1.9659 | 5.0 | 7710 | 1.9140 |
| 1.9545 | 6.0 | 9252 | 1.9418 |
| 1.9104 | 7.0 | 10794 | 1.9179 |
| 1.8991 | 8.0 | 12336 | 1.9157 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALL", "results": []}]} | Jeska/BertjeWDialDataALL | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers | {} | Jeska/BertjeWDialDataALL02 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALL03
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9459
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 8.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.1951 | 1.0 | 1542 | 2.0285 |
| 2.0918 | 2.0 | 3084 | 1.9989 |
| 2.0562 | 3.0 | 4626 | 2.0162 |
| 2.0012 | 4.0 | 6168 | 1.9330 |
| 1.9705 | 5.0 | 7710 | 1.9151 |
| 1.9571 | 6.0 | 9252 | 1.9419 |
| 1.9113 | 7.0 | 10794 | 1.9175 |
| 1.8988 | 8.0 | 12336 | 1.9143 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALL03", "results": []}]} | Jeska/BertjeWDialDataALL03 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALL04
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9717
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 8.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.2954 | 1.0 | 1542 | 2.0372 |
| 2.2015 | 2.0 | 3084 | 2.0104 |
| 2.1661 | 3.0 | 4626 | 2.0372 |
| 2.1186 | 4.0 | 6168 | 1.9549 |
| 2.0939 | 5.0 | 7710 | 1.9438 |
| 2.0867 | 6.0 | 9252 | 1.9648 |
| 2.0462 | 7.0 | 10794 | 1.9465 |
| 2.0315 | 8.0 | 12336 | 1.9412 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALL04", "results": []}]} | Jeska/BertjeWDialDataALL04 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9438
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 15.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.2122 | 1.0 | 871 | 2.0469 |
| 2.0961 | 2.0 | 1742 | 2.0117 |
| 2.0628 | 3.0 | 2613 | 2.0040 |
| 2.0173 | 4.0 | 3484 | 1.9901 |
| 1.9772 | 5.0 | 4355 | 1.9711 |
| 1.9455 | 6.0 | 5226 | 1.9785 |
| 1.917 | 7.0 | 6097 | 1.9380 |
| 1.8933 | 8.0 | 6968 | 1.9651 |
| 1.8708 | 9.0 | 7839 | 1.9915 |
| 1.862 | 10.0 | 8710 | 1.9310 |
| 1.8545 | 11.0 | 9581 | 1.9422 |
| 1.8231 | 12.0 | 10452 | 1.9310 |
| 1.8141 | 13.0 | 11323 | 1.9362 |
| 1.7939 | 14.0 | 12194 | 1.9334 |
| 1.8035 | 15.0 | 13065 | 1.9197 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly", "results": []}]} | Jeska/BertjeWDialDataALLQonly | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly02
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9043
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 12.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.2438 | 1.0 | 871 | 2.1122 |
| 2.1235 | 2.0 | 1742 | 2.0784 |
| 2.0712 | 3.0 | 2613 | 2.0679 |
| 2.0034 | 4.0 | 3484 | 2.0546 |
| 1.9375 | 5.0 | 4355 | 2.0277 |
| 1.8911 | 6.0 | 5226 | 2.0364 |
| 1.8454 | 7.0 | 6097 | 1.9812 |
| 1.808 | 8.0 | 6968 | 2.0175 |
| 1.7716 | 9.0 | 7839 | 2.0286 |
| 1.7519 | 10.0 | 8710 | 1.9653 |
| 1.7358 | 11.0 | 9581 | 1.9817 |
| 1.7084 | 12.0 | 10452 | 1.9633 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly02", "results": []}]} | Jeska/BertjeWDialDataALLQonly02 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly03
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9995
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 24.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| No log | 1.0 | 435 | 2.0751 |
| 2.1982 | 2.0 | 870 | 2.0465 |
| 2.0841 | 3.0 | 1305 | 2.0420 |
| 2.0374 | 4.0 | 1740 | 2.0325 |
| 1.9731 | 5.0 | 2175 | 2.0075 |
| 1.9248 | 6.0 | 2610 | 2.0219 |
| 1.8848 | 7.0 | 3045 | 1.9770 |
| 1.8848 | 8.0 | 3480 | 2.0093 |
| 1.8419 | 9.0 | 3915 | 2.0298 |
| 1.804 | 10.0 | 4350 | 1.9681 |
| 1.7817 | 11.0 | 4785 | 1.9938 |
| 1.7472 | 12.0 | 5220 | 1.9654 |
| 1.7075 | 13.0 | 5655 | 1.9797 |
| 1.6976 | 14.0 | 6090 | 1.9691 |
| 1.6748 | 15.0 | 6525 | 1.9568 |
| 1.6748 | 16.0 | 6960 | 1.9618 |
| 1.6528 | 17.0 | 7395 | 1.9843 |
| 1.6335 | 18.0 | 7830 | 1.9265 |
| 1.6179 | 19.0 | 8265 | 1.9598 |
| 1.5992 | 20.0 | 8700 | 1.9331 |
| 1.583 | 21.0 | 9135 | 1.9795 |
| 1.5699 | 22.0 | 9570 | 2.0073 |
| 1.5703 | 23.0 | 10005 | 1.9308 |
| 1.5703 | 24.0 | 10440 | 1.9285 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly03", "results": []}]} | Jeska/BertjeWDialDataALLQonly03 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers | {} | Jeska/BertjeWDialDataALLQonly04 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly05
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3921
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 12.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.9349 | 1.0 | 871 | 2.9642 |
| 2.9261 | 2.0 | 1742 | 2.9243 |
| 2.8409 | 3.0 | 2613 | 2.8895 |
| 2.7308 | 4.0 | 3484 | 2.8394 |
| 2.6042 | 5.0 | 4355 | 2.7703 |
| 2.4671 | 6.0 | 5226 | 2.7522 |
| 2.3481 | 7.0 | 6097 | 2.6339 |
| 2.2493 | 8.0 | 6968 | 2.6224 |
| 2.1233 | 9.0 | 7839 | 2.5637 |
| 2.0194 | 10.0 | 8710 | 2.4896 |
| 1.9178 | 11.0 | 9581 | 2.4689 |
| 1.8588 | 12.0 | 10452 | 2.4663 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly05", "results": []}]} | Jeska/BertjeWDialDataALLQonly05 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers | {} | Jeska/BertjeWDialDataALLQonly06 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly07
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.1135
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 18.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.3589 | 1.0 | 871 | 2.2805 |
| 2.2563 | 2.0 | 1742 | 2.2501 |
| 2.1936 | 3.0 | 2613 | 2.2419 |
| 2.11 | 4.0 | 3484 | 2.2301 |
| 2.0311 | 5.0 | 4355 | 2.2320 |
| 1.969 | 6.0 | 5226 | 2.2276 |
| 1.9148 | 7.0 | 6097 | 2.1621 |
| 1.8569 | 8.0 | 6968 | 2.1876 |
| 1.7978 | 9.0 | 7839 | 2.2011 |
| 1.7602 | 10.0 | 8710 | 2.1280 |
| 1.7166 | 11.0 | 9581 | 2.1644 |
| 1.6651 | 12.0 | 10452 | 2.1246 |
| 1.6141 | 13.0 | 11323 | 2.1264 |
| 1.5759 | 14.0 | 12194 | 2.1143 |
| 1.5478 | 15.0 | 13065 | 2.0982 |
| 1.5311 | 16.0 | 13936 | 2.0993 |
| 1.5187 | 17.0 | 14807 | 2.0979 |
| 1.4809 | 18.0 | 15678 | 2.0338 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly07", "results": []}]} | Jeska/BertjeWDialDataALLQonly07 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers | {} | Jeska/BertjeWDialDataALLQonly08 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly09
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9043
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 12.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.2439 | 1.0 | 871 | 2.1102 |
| 2.1235 | 2.0 | 1742 | 2.0785 |
| 2.0709 | 3.0 | 2613 | 2.0689 |
| 2.0033 | 4.0 | 3484 | 2.0565 |
| 1.9386 | 5.0 | 4355 | 2.0290 |
| 1.8909 | 6.0 | 5226 | 2.0366 |
| 1.8449 | 7.0 | 6097 | 1.9809 |
| 1.8078 | 8.0 | 6968 | 2.0177 |
| 1.7709 | 9.0 | 7839 | 2.0289 |
| 1.7516 | 10.0 | 8710 | 1.9645 |
| 1.7354 | 11.0 | 9581 | 1.9810 |
| 1.7073 | 12.0 | 10452 | 1.9631 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly09", "results": []}]} | Jeska/BertjeWDialDataALLQonly09 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataALLQonly128
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 2.0364
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 12.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 2.2326 | 1.0 | 871 | 2.1509 |
| 2.1375 | 2.0 | 1742 | 2.1089 |
| 2.0442 | 3.0 | 2613 | 2.0655 |
| 2.0116 | 4.0 | 3484 | 2.0433 |
| 1.9346 | 5.0 | 4355 | 2.0134 |
| 1.9056 | 6.0 | 5226 | 1.9956 |
| 1.8295 | 7.0 | 6097 | 2.0287 |
| 1.8204 | 8.0 | 6968 | 2.0173 |
| 1.7928 | 9.0 | 7839 | 2.0251 |
| 1.7357 | 10.0 | 8710 | 2.0148 |
| 1.7318 | 11.0 | 9581 | 1.9274 |
| 1.7311 | 12.0 | 10452 | 1.9314 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataALLQonly128", "results": []}]} | Jeska/BertjeWDialDataALLQonly128 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
fill-mask | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# BertjeWDialDataQA20k
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 1.9208
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 16
- eval_batch_size: 8
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 2.1713 | 1.0 | 1542 | 2.0098 |
| 2.0736 | 2.0 | 3084 | 1.9853 |
| 2.0543 | 3.0 | 4626 | 2.0134 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "model-index": [{"name": "BertjeWDialDataQA20k", "results": []}]} | Jeska/BertjeWDialDataQA20k | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"fill-mask",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Jeska/VaccinChatSentenceClassifierDutch | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTje
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6223
- Accuracy: 0.9068
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 15.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 3.4666 | 1.0 | 1320 | 2.3355 | 0.5768 |
| 1.5293 | 2.0 | 2640 | 1.1118 | 0.8144 |
| 0.8031 | 3.0 | 3960 | 0.6362 | 0.8803 |
| 0.2985 | 4.0 | 5280 | 0.5119 | 0.8958 |
| 0.1284 | 5.0 | 6600 | 0.5023 | 0.8931 |
| 0.0842 | 6.0 | 7920 | 0.5246 | 0.9022 |
| 0.0414 | 7.0 | 9240 | 0.5581 | 0.9013 |
| 0.0372 | 8.0 | 10560 | 0.5721 | 0.9004 |
| 0.0292 | 9.0 | 11880 | 0.5469 | 0.9141 |
| 0.0257 | 10.0 | 13200 | 0.5871 | 0.9059 |
| 0.0189 | 11.0 | 14520 | 0.6181 | 0.9049 |
| 0.0104 | 12.0 | 15840 | 0.6184 | 0.9068 |
| 0.009 | 13.0 | 17160 | 0.6013 | 0.9049 |
| 0.0051 | 14.0 | 18480 | 0.6205 | 0.9059 |
| 0.0035 | 15.0 | 19800 | 0.6223 | 0.9068 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTje", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTje2
This model is a fine-tuned version of [GroNLP/bert-base-dutch-cased](https://huggingface.co/GroNLP/bert-base-dutch-cased) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5112
- Accuracy: 0.9004
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
- lr_scheduler_type: linear
- num_epochs: 15.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 4.1505 | 1.0 | 1320 | 3.3293 | 0.3793 |
| 2.7333 | 2.0 | 2640 | 2.2295 | 0.6133 |
| 2.0189 | 3.0 | 3960 | 1.5134 | 0.7587 |
| 1.2504 | 4.0 | 5280 | 1.0765 | 0.8282 |
| 0.7733 | 5.0 | 6600 | 0.7937 | 0.8629 |
| 0.5217 | 6.0 | 7920 | 0.6438 | 0.8784 |
| 0.3148 | 7.0 | 9240 | 0.5733 | 0.8857 |
| 0.2067 | 8.0 | 10560 | 0.5362 | 0.8912 |
| 0.1507 | 9.0 | 11880 | 0.5098 | 0.8903 |
| 0.1024 | 10.0 | 13200 | 0.5078 | 0.8976 |
| 0.0837 | 11.0 | 14520 | 0.5054 | 0.8967 |
| 0.0608 | 12.0 | 15840 | 0.5062 | 0.8958 |
| 0.0426 | 13.0 | 17160 | 0.5072 | 0.9013 |
| 0.0374 | 14.0 | 18480 | 0.5110 | 0.9040 |
| 0.0346 | 15.0 | 19800 | 0.5112 | 0.9004 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTje2", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje2 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialog
This model is a fine-tuned version of [outputDA/checkpoint-7710](https://huggingface.co/outputDA/checkpoint-7710) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5025
- Accuracy: 0.9077
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
- lr_scheduler_type: linear
- num_epochs: 15.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 3.9925 | 1.0 | 1320 | 3.0954 | 0.4223 |
| 2.5041 | 2.0 | 2640 | 1.9762 | 0.6563 |
| 1.8061 | 3.0 | 3960 | 1.3196 | 0.7952 |
| 1.0694 | 4.0 | 5280 | 0.9304 | 0.8510 |
| 0.6479 | 5.0 | 6600 | 0.6875 | 0.8821 |
| 0.4408 | 6.0 | 7920 | 0.5692 | 0.8976 |
| 0.2542 | 7.0 | 9240 | 0.5291 | 0.8949 |
| 0.1709 | 8.0 | 10560 | 0.5038 | 0.9059 |
| 0.1181 | 9.0 | 11880 | 0.4885 | 0.9049 |
| 0.0878 | 10.0 | 13200 | 0.4900 | 0.9049 |
| 0.0702 | 11.0 | 14520 | 0.4930 | 0.9086 |
| 0.0528 | 12.0 | 15840 | 0.4987 | 0.9113 |
| 0.0406 | 13.0 | 17160 | 0.5009 | 0.9113 |
| 0.0321 | 14.0 | 18480 | 0.5017 | 0.9104 |
| 0.0308 | 15.0 | 19800 | 0.5025 | 0.9077 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialog", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialog | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers | {} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialog02 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly
This model is a fine-tuned version of [outputDAQonly/checkpoint-8710](https://huggingface.co/outputDAQonly/checkpoint-8710) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5008
- Accuracy: 0.9068
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06
- lr_scheduler_type: linear
- num_epochs: 15.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 4.0751 | 1.0 | 1320 | 3.1674 | 0.4086 |
| 2.5619 | 2.0 | 2640 | 2.0335 | 0.6426 |
| 1.8549 | 3.0 | 3960 | 1.3537 | 0.7861 |
| 1.106 | 4.0 | 5280 | 0.9515 | 0.8519 |
| 0.6698 | 5.0 | 6600 | 0.7152 | 0.8757 |
| 0.4497 | 6.0 | 7920 | 0.5838 | 0.8921 |
| 0.2626 | 7.0 | 9240 | 0.5300 | 0.8940 |
| 0.1762 | 8.0 | 10560 | 0.4984 | 0.8958 |
| 0.119 | 9.0 | 11880 | 0.4906 | 0.9059 |
| 0.0919 | 10.0 | 13200 | 0.4896 | 0.8995 |
| 0.0722 | 11.0 | 14520 | 0.5012 | 0.9022 |
| 0.0517 | 12.0 | 15840 | 0.4951 | 0.9040 |
| 0.0353 | 13.0 | 17160 | 0.4988 | 0.9040 |
| 0.0334 | 14.0 | 18480 | 0.5035 | 0.9049 |
| 0.0304 | 15.0 | 19800 | 0.5008 | 0.9068 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly09
This model is a fine-tuned version of [outputDAQonly09/](https://huggingface.co/outputDAQonly09/) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4978
- Accuracy: 0.9031
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 1e-05
- train_batch_size: 32
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 30.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| No log | 1.0 | 330 | 3.9692 | 0.2249 |
| 4.3672 | 2.0 | 660 | 3.1312 | 0.4031 |
| 4.3672 | 3.0 | 990 | 2.5068 | 0.5658 |
| 3.1495 | 4.0 | 1320 | 2.0300 | 0.6600 |
| 2.2491 | 5.0 | 1650 | 1.6517 | 0.7450 |
| 2.2491 | 6.0 | 1980 | 1.3604 | 0.7943 |
| 1.622 | 7.0 | 2310 | 1.1328 | 0.8327 |
| 1.1252 | 8.0 | 2640 | 0.9484 | 0.8611 |
| 1.1252 | 9.0 | 2970 | 0.8212 | 0.8757 |
| 0.7969 | 10.0 | 3300 | 0.7243 | 0.8830 |
| 0.5348 | 11.0 | 3630 | 0.6597 | 0.8867 |
| 0.5348 | 12.0 | 3960 | 0.5983 | 0.8857 |
| 0.3744 | 13.0 | 4290 | 0.5635 | 0.8976 |
| 0.2564 | 14.0 | 4620 | 0.5437 | 0.8985 |
| 0.2564 | 15.0 | 4950 | 0.5124 | 0.9013 |
| 0.1862 | 16.0 | 5280 | 0.5074 | 0.9022 |
| 0.1349 | 17.0 | 5610 | 0.5028 | 0.9049 |
| 0.1349 | 18.0 | 5940 | 0.4876 | 0.9077 |
| 0.0979 | 19.0 | 6270 | 0.4971 | 0.9049 |
| 0.0763 | 20.0 | 6600 | 0.4941 | 0.9022 |
| 0.0763 | 21.0 | 6930 | 0.4957 | 0.9049 |
| 0.0602 | 22.0 | 7260 | 0.4989 | 0.9049 |
| 0.0504 | 23.0 | 7590 | 0.4959 | 0.9040 |
| 0.0504 | 24.0 | 7920 | 0.4944 | 0.9031 |
| 0.0422 | 25.0 | 8250 | 0.4985 | 0.9040 |
| 0.0379 | 26.0 | 8580 | 0.4970 | 0.9049 |
| 0.0379 | 27.0 | 8910 | 0.4949 | 0.9040 |
| 0.0351 | 28.0 | 9240 | 0.4971 | 0.9040 |
| 0.0321 | 29.0 | 9570 | 0.4967 | 0.9031 |
| 0.0321 | 30.0 | 9900 | 0.4978 | 0.9031 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly09", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTje2_DAdialogQonly09 | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# VaccinChatSentenceClassifierDutch_fromBERTjeDIAL
This model is a fine-tuned version of [Jeska/BertjeWDialDataQA20k](https://huggingface.co/Jeska/BertjeWDialDataQA20k) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.8355
- Accuracy: 0.6322
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3.0
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 3.4418 | 1.0 | 1457 | 2.3866 | 0.5406 |
| 1.7742 | 2.0 | 2914 | 1.9365 | 0.6069 |
| 1.1313 | 3.0 | 4371 | 1.8355 | 0.6322 |
### Framework versions
- Transformers 4.13.0.dev0
- Pytorch 1.10.0
- Datasets 1.16.1
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "VaccinChatSentenceClassifierDutch_fromBERTjeDIAL", "results": []}]} | Jeska/VaccinChatSentenceClassifierDutch_fromBERTjeDIAL | null | [
"transformers",
"pytorch",
"tensorboard",
"bert",
"text-classification",
"generated_from_trainer",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Jeska/XLM-RoBERTaWDialDataALL01 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
# Model Trained Using AutoNLP
- Problem type: Multi-class Classification
- Model ID: 22144706
- CO2 Emissions (in grams): 27.135492487925884
## Validation Metrics
- Loss: 1.81697416305542
- Accuracy: 0.6377269139700079
- Macro F1: 0.5181293370145044
- Micro F1: 0.6377269139700079
- Weighted F1: 0.631117826235572
- Macro Precision: 0.5371452512845428
- Micro Precision: 0.6377269139700079
- Weighted Precision: 0.6655055695465463
- Macro Recall: 0.5609328178925124
- Micro Recall: 0.6377269139700079
- Weighted Recall: 0.6377269139700079
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/Jeska/autonlp-vaccinfaq-22144706
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("Jeska/autonlp-vaccinfaq-22144706", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("Jeska/autonlp-vaccinfaq-22144706", use_auth_token=True)
inputs = tokenizer("I love AutoNLP", return_tensors="pt")
outputs = model(**inputs)
``` | {"language": "unk", "tags": "autonlp", "datasets": ["Jeska/autonlp-data-vaccinfaq"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}], "co2_eq_emissions": 27.135492487925884} | Jeska/autonlp-vaccinfaq-22144706 | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"autonlp",
"unk",
"dataset:Jeska/autonlp-data-vaccinfaq",
"co2_eq_emissions",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | JesseParvess/fine-tune-wav2vec2 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JhunMarson/JhunMarson | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | `LOREN` is an interpretable fact verification model trained on [FEVER](https://fever.ai), which aims to predict the veracity of a textual claim against a trustworthy knowledge source such as Wikipedia.
`LOREN` also decomposes the verification and makes accurate and faithful phrase-level veracity predictions without any phrasal veracity supervision.
This repo hosts the following pre-trained models for `LOREN`:
- `fact_checking/`: the verification models based on BERT (large) and RoBERTa (large), respectively.
- `mrc_seq2seq/`: the generative machine reading comprehension model based on BART (base).
- `evidence_retrieval/`: the evidence sentence ranking models, which are copied directly from [KGAT](https://github.com/thunlp/KernelGAT).
More technical details can be found at [this GitHub Repo](https://github.com/jiangjiechen/LOREN).
Please check out our AAAI 2022 paper for more details: "[LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification](https://arxiv.org/abs/2012.13577)". | {} | jiangjiechen/loren | null | [
"arxiv:2012.13577",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Jiayao/distilbert-base-uncased-finetuned-squad | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jibans/Bob_Marley | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jiejie/asr | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JihyukKim/cbert-aleqd-s100-b36-g2-ib-hn | null | [
"pytorch",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JihyukKim/cbert-b36-g2-ib-hn | null | [
"pytorch",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jihyun22/bert-base-finetuned-ner | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-base-finetuned-nli
This model is a fine-tuned version of [klue/bert-base](https://huggingface.co/klue/bert-base) on the klue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1357
- Accuracy: 0.756
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 5
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| No log | 1.0 | 196 | 0.7357 | 0.156 |
| No log | 2.0 | 392 | 0.5952 | 0.0993 |
| 0.543 | 3.0 | 588 | 0.5630 | 0.099 |
| 0.543 | 4.0 | 784 | 0.5670 | 0.079 |
| 0.543 | 5.0 | 980 | 0.5795 | 0.078 |
### Framework versions
- Transformers 4.9.2
- Pytorch 1.9.0+cu102
- Datasets 1.11.0
- Tokenizers 0.10.3
| {"tags": ["generated_from_trainer"], "datasets": ["klue"], "metrics": ["accuracy"], "model_index": [{"name": "bert-base-finetuned-nli", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "dataset": {"name": "klue", "type": "klue", "args": "nli"}, "metric": {"name": "Accuracy", "type": "accuracy", "value": 0.756}}]}]} | Jihyun22/bert-base-finetuned-nli | null | [
"transformers",
"pytorch",
"bert",
"text-classification",
"generated_from_trainer",
"dataset:klue",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Jihyun22/roberta-base-finetuned-nli | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | JihyunLEE/bert-base-uncased-finetuned-swag | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/Rose-Brain | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/dummy-hf-hub | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | LysandreJik/dummy-model | null | [
"transformers",
"pytorch",
"camembert",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/fat-pushes | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/flax-model | null | [
"jax",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/layoutlmv2-base-uncased | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | LysandreJik/local_dir | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/local_dir2 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/local_dir3 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | LysandreJik/local_dir_1 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/metnet-test | null | [
"pytorch",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/model | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/new-repo | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/random-model | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/repo-with-large-files | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | LysandreJik/test-upload | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers | {} | LysandreJik/test-upload1 | null | [
"transformers",
"pytorch",
"distilbert",
"text-classification",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-classification | transformers |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# testing
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6644
- Accuracy: 0.6814
- F1: 0.8105
- Combined Score: 0.7459
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 1
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- training_steps: 10
### Training results
### Framework versions
- Transformers 4.11.0.dev0
- Pytorch 1.9.0+cu111
- Datasets 1.11.0
- Tokenizers 0.10.3
| {"language": ["en"], "license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["glue"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "testing", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE MRPC", "type": "glue", "args": "mrpc"}, "metrics": [{"type": "accuracy", "value": 0.6813725490196079, "name": "Accuracy"}, {"type": "f1", "value": 0.8104956268221574, "name": "F1"}]}]}]} | LysandreJik/testing | null | [
"transformers",
"pytorch",
"tensorboard",
"distilbert",
"text-classification",
"generated_from_trainer",
"en",
"dataset:glue",
"license:apache-2.0",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | LysandreJik/text-files-2 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/text-files | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | LysandreJik/torch-model-2 | null | [
"transformers",
"pytorch",
"bert",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/torch-model | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/trocr-large | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | LysandreJik/with-commit-1 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers |
# Jimmy's character DialoGPT model | {"tags": ["conversational"]} | JimmyHodl/DialoGPT-medium | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | JimmyHodl/Model | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | {} | Jinchao/wav2vec2-large-xls-r-300m-turkish-colab | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | transformers |
# KrELECTRA-base-mecab
Korean-based Pre-trained ELECTRA Language Model using Mecab (Morphological Analyzer)
## Usage
### Load model and tokenizer
```python
>>> from transformers import AutoTokenizer, AutoModelForPreTraining
>>> model = AutoModelForPreTraining.from_pretrained("Jinhwan/krelectra-base-mecab")
>>> tokenizer = AutoTokenizer.from_pretrained("Jinhwan/krelectra-base-mecab")
```
### Tokenizer example
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("Jinhwan/krelectra-base-mecab")
>>> tokenizer.tokenize("[CLS] 한국어 ELECTRA를 공유합니다. [SEP]")
['[CLS]', '한국어', 'EL', '##ECT', '##RA', '##를', '공유', '##합', '##니다', '.', '[SEP]']
>>> tokenizer.convert_tokens_to_ids(['[CLS]', '한국어', 'EL', '##ECT', '##RA', '##를', '공유', '##합', '##니다', '.', '[SEP]'])
[2, 7214, 24023, 24663, 26580, 3195, 7086, 3746, 5500, 17, 3]
| {"language": "ko", "license": "apache-2.0", "tags": ["korean"]} | Jinhwan/krelectra-base-mecab | null | [
"transformers",
"pytorch",
"electra",
"pretraining",
"korean",
"ko",
"license:apache-2.0",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Jinx18/Nastya19 | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/Flos_gpt-2_erw-02 | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/Flos_gpt-2_erw | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/MegStuart_gpt-2 | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/gpt2-Flo-BasBoettcher-Chefkoch | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/gpt2-Flo-BasBoettcher | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
text-generation | transformers | {} | Jipski/gpt2-FloSolo | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
null | null | for test | {"license": "afl-3.0"} | Jira/first_test | null | [
"license:afl-3.0",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | JirroReo/DialoGPT-small-rick | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
feature-extraction | transformers | {} | indexxlim/HanBART_base | null | [
"transformers",
"pytorch",
"bart",
"feature-extraction",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jitin/manglish | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
fill-mask | transformers | {} | Jitin/romanized-malayalam | null | [
"transformers",
"pytorch",
"jax",
"roberta",
"fill-mask",
"autotrain_compatible",
"endpoints_compatible",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
|
zero-shot-classification | transformers |
# XLM-roBERTa-large-it-mnli
## Version 0.1
| | matched-it acc | mismatched-it acc |
| -------------------------------------------------------------------------------- |----------------|-------------------|
| XLM-roBERTa-large-it-mnli | 84.75 | 85.39 |
## Model Description
This model takes [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) and fine-tunes it on a subset of NLI data taken from a automatically translated version of the MNLI corpus. It is intended to be used for zero-shot text classification, such as with the Hugging Face [ZeroShotClassificationPipeline](https://huggingface.co/transformers/master/main_classes/pipelines.html#transformers.ZeroShotClassificationPipeline).
## Intended Usage
This model is intended to be used for zero-shot text classification of italian texts.
Since the base model was pre-trained trained on 100 different languages, the
model has shown some effectiveness in languages beyond those listed above as
well. See the full list of pre-trained languages in appendix A of the
[XLM Roberata paper](https://arxiv.org/abs/1911.02116)
For English-only classification, it is recommended to use
[bart-large-mnli](https://huggingface.co/facebook/bart-large-mnli) or
[a distilled bart MNLI model](https://huggingface.co/models?filter=pipeline_tag%3Azero-shot-classification&search=valhalla).
#### With the zero-shot classification pipeline
The model can be loaded with the `zero-shot-classification` pipeline like so:
```python
from transformers import pipeline
classifier = pipeline("zero-shot-classification",
model="Jiva/xlm-roberta-large-it-mnli", device=0, use_fast=True, multi_label=True)
```
You can then classify in any of the above languages. You can even pass the labels in one language and the sequence to
classify in another:
```python
# we will classify the following wikipedia entry about Sardinia"
sequence_to_classify = "La Sardegna è una regione italiana a statuto speciale di 1 592 730 abitanti con capoluogo Cagliari, la cui denominazione bilingue utilizzata nella comunicazione ufficiale è Regione Autonoma della Sardegna / Regione Autònoma de Sardigna."
# we can specify candidate labels in Italian:
candidate_labels = ["geografia", "politica", "macchine", "cibo", "moda"]
classifier(sequence_to_classify, candidate_labels)
# {'labels': ['geografia', 'moda', 'politica', 'macchine', 'cibo'],
# 'scores': [0.38871392607688904, 0.22633370757102966, 0.19398456811904907, 0.13735772669315338, 0.13708525896072388]}
```
The default hypothesis template is the English, `This text is {}`. With this model better results are achieving when providing a translated template:
```python
sequence_to_classify = "La Sardegna è una regione italiana a statuto speciale di 1 592 730 abitanti con capoluogo Cagliari, la cui denominazione bilingue utilizzata nella comunicazione ufficiale è Regione Autonoma della Sardegna / Regione Autònoma de Sardigna."
candidate_labels = ["geografia", "politica", "macchine", "cibo", "moda"]
hypothesis_template = "si parla di {}"
# classifier(sequence_to_classify, candidate_labels, hypothesis_template=hypothesis_template)
# 'scores': [0.6068345904350281, 0.34715887904167175, 0.32433947920799255, 0.3068877160549164, 0.18744681775569916]}
```
#### With manual PyTorch
```python
# pose sequence as a NLI premise and label as a hypothesis
from transformers import AutoModelForSequenceClassification, AutoTokenizer
nli_model = AutoModelForSequenceClassification.from_pretrained('Jiva/xlm-roberta-large-it-mnli')
tokenizer = AutoTokenizer.from_pretrained('Jiva/xlm-roberta-large-it-mnli')
premise = sequence
hypothesis = f'si parla di {}.'
# run through model pre-trained on MNLI
x = tokenizer.encode(premise, hypothesis, return_tensors='pt',
truncation_strategy='only_first')
logits = nli_model(x.to(device))[0]
# we throw away "neutral" (dim 1) and take the probability of
# "entailment" (2) as the probability of the label being true
entail_contradiction_logits = logits[:,[0,2]]
probs = entail_contradiction_logits.softmax(dim=1)
prob_label_is_true = probs[:,1]
```
## Training
## Version 0.1
The model has been now retrained on the full training set. Around 1000 sentences pairs have been removed from the set because their translation was botched by the translation model.
| metric | value |
|----------------- |------- |
| learning_rate | 4e-6 |
| optimizer | AdamW |
| batch_size | 80 |
| mcc | 0.77 |
| train_loss | 0.34 |
| eval_loss | 0.40 |
| stopped_at_step | 9754 |
## Version 0.0
This model was pre-trained on set of 100 languages, as described in
[the original paper](https://arxiv.org/abs/1911.02116). It was then fine-tuned on the task of NLI on an Italian translation of the MNLI dataset (85% of the train set only so far). The model used for translating the texts is Helsinki-NLP/opus-mt-en-it, with a max output sequence lenght of 120. The model has been trained for 1 epoch with learning rate 4e-6 and batch size 80, currently it scores 82 acc. on the remaining 15% of the training. | {"language": "it", "license": "mit", "tags": ["text-classification", "pytorch", "tensorflow"], "datasets": ["multi_nli", "glue"], "pipeline_tag": "zero-shot-classification", "widget": [{"text": "La seconda guerra mondiale vide contrapporsi, tra il 1939 e il 1945, le cosiddette potenze dell'Asse e gli Alleati che, come gi\u00e0 accaduto ai belligeranti della prima guerra mondiale, si combatterono su gran parte del pianeta; il conflitto ebbe inizio il 1\u00ba settembre 1939 con l'attacco della Germania nazista alla Polonia e termin\u00f2, nel teatro europeo, l'8 maggio 1945 con la resa tedesca e, in quello asiatico, il successivo 2 settembre con la resa dell'Impero giapponese dopo i bombardamenti atomici di Hiroshima e Nagasaki.", "candidate_labels": "guerra, storia, moda, cibo", "multi_class": true}], "model-index": [{"name": "Jiva/xlm-roberta-large-it-mnli", "results": [{"task": {"type": "natural-language-inference", "name": "Natural Language Inference"}, "dataset": {"name": "glue", "type": "glue", "config": "mnli", "split": "validation_matched"}, "metrics": [{"type": "accuracy", "value": 0.8819154355578197, "name": "Accuracy", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNjY3MTgxNjg2ZGZmYjRjNmUyYWMwYzA3M2I3M2U0ZTYxZTFlNWY0Y2Y3MjZhYmVmM2U0OTZlYmJiMzU0MWRiMiIsInZlcnNpb24iOjF9.jgND_l7mc3EtHPiAPbAas7YaNnNZ5FSZNmIDOHSEpqV87lGL2XL4seol_MspagWmoQAN_RGdSM9nsIQH364EAw"}, {"type": "precision", "value": 0.8814638070461666, "name": "Precision Macro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOGY0MjQ0ZDkyMzA3NmU2YmYzMGUyNTJmNWUxMTI4MTI5YzhiNjA2MzZiZDBmMTc4ODdhMzcxNTMyM2Y0MWIwOCIsInZlcnNpb24iOjF9.BCDxzHFaXZWISV2qkXimdnIxGT3qVos-tcBv3Yp9VntL2ot4e-Nifman-Yb4XwmHccTxBnf3TY0DxEE55vF9BQ"}, {"type": "precision", "value": 0.8819154355578197, "name": "Precision Micro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiZTlkZWIzNTBhNmFkNzkwNzg3ODcxNmU3YjgwODBmMmE5Njc3M2RmMDk0ZGFjZWYwMDBmNzVjOTQ3NGYyZjI3ZSIsInZlcnNpb24iOjF9.ejVcvVSUBWSMbvpxlkVi73qzkwNBgD5C1GBTandyWbk3bOas7fJ26x0duI6sNkgz-Y3Q_3pI-LJSCZgtPhP0Bw"}, {"type": "precision", "value": 0.881571663280083, "name": "Precision Weighted", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNDFkMWI2MTIwNjRmYjgxYjZiNWJmZWZmNzAxNDcwODdjYzg2MTAwM2I5YWRjYWQ0MzA5MTk5MTFlZDI5NGQ4MiIsInZlcnNpb24iOjF9.GrHhqY6L8AJEy0XaNzR2QI2nnwJUen8Ay5sKVh0gBN3jAv-DWwNrjVZgeclGgH4pOdRxxlNCOkZyPnEEon4eAA"}, {"type": "recall", "value": 0.8802419956104793, "name": "Recall Macro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjFhNjA2M2IxZGQwYjE3YzIzZGRkMTM1MDg5OTBiNTY3YjE1YjE0ZDlkNmI1ZmY5ZmM5OTZkOTk2ODI3Mzc3YiIsInZlcnNpb24iOjF9.yWoQSRCGGu6mNhjak6fPM-w01kAlDK8lDVdlKserf19gEeiB4vyPfklrp4HdlRFadfUB7pJ2iloTCkDj_jPYBA"}, {"type": "recall", "value": 0.8819154355578197, "name": "Recall Micro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjQ1N2FhNmRiMWY5YmIwODgzNjI2YjY2NzgwNmQ2ZDRmN2UzNTg3MWQ0NDhmMjMzNjc2NGExMjliNWYxMDRjZSIsInZlcnNpb24iOjF9.XGiAwKlPkFwimVDK2CJ37oi8mz2KkJMxAanTJNFcW_Lwa-9T9--yZNtS3t1pfbUP2NeXxCzW_8DlxnM7RcG2DA"}, {"type": "recall", "value": 0.8819154355578197, "name": "Recall Weighted", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNDU1OWFjN2ZmYjVlNWJjZTVmZDQ0MmVjZmFkMmU2OTkzZTcxZDkyZTlmN2E0NjFkOTE4YzU1ZjVjYWMxYjViYSIsInZlcnNpb24iOjF9.HpRWd_-NXIgZemTCIcpK2lpe4bt2fro_NgWX2wuvN4uWVdKsYKr9v5W8EOEv4xWzdbgtlllCG9UCc3-7YqBAAg"}, {"type": "f1", "value": 0.8802937937959167, "name": "F1 Macro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiY2U1OGNmZDMxZTUwNDgxZjIzYWM2ZGQzZTg1NmNjMjdjNTkxNTk0MGI2ZDlkYjVmODFiZTllZmE0NzZlZWVlOCIsInZlcnNpb24iOjF9.7NupnTf-kIv0pIoof-2XHp7ESavQeTDDRGs3bTF3F0UJsorY8WO7I_qyoGiuPmLWtwFsNJjybQdMahM_oss7Ag"}, {"type": "f1", "value": 0.8819154355578197, "name": "F1 Micro", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiODA2MGU2MzM5OWRjMTk4OGYxNTIxMjUyNWI0YjU5ZWRlMDZhMWRjMjk1MmQzZDg0YTYzYzY4M2U3OWFhNzEwNiIsInZlcnNpb24iOjF9.dIYUojg4cbbQCP6rlp2tbX72tMR5ROtUZYFDJBgHD8_KfEAr9nNoLeP2cvFCYcFe8MyQh7LADTK5l0PTt3B0AQ"}, {"type": "f1", "value": 0.8811955957302677, "name": "F1 Weighted", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiY2I2ZDQ4NWY5NmNmZjNjOWRjNGUyYzcyZWNjNzA0MGJlZmRkYWIwNjVmYmFlNjRmMjAwMWIwMTJjNDY1MjYxNyIsInZlcnNpb24iOjF9.LII2Vu8rWWbjWU55Yenf4ZsSpReiPsoBmHH1XwgVu7HgTtL-TnRaCCxSTJ0i0jnK8sa2kKqXw1RndE1HL1GbBQ"}, {"type": "loss", "value": 0.3171548545360565, "name": "loss", "verified": true, "verifyToken": "eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOGYxNDA4YzBjMGU5MDBjNGQwOThlMzZkNWFjNDg4MzdiNWFiNGM2ZmQyOTZmNTBkMTE1OGI1NzhmMGM3ZWJjYSIsInZlcnNpb24iOjF9._yP8hC7siIQkSG8-R9RLlIYqqyh8sobk-jN1-QELU0iv9VS54df_7nNPy8hGUVx-TAntaIeFyQ8DLVcM_vVDDw"}]}]}]} | Jiva/xlm-roberta-large-it-mnli | null | [
"transformers",
"pytorch",
"safetensors",
"xlm-roberta",
"text-classification",
"tensorflow",
"zero-shot-classification",
"it",
"dataset:multi_nli",
"dataset:glue",
"arxiv:1911.02116",
"license:mit",
"model-index",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
text-generation | transformers |
# My Awesome Model | {"tags": ["conversational"]} | Jllama/dialoGPT-small-Joshua-test | null | [
"transformers",
"pytorch",
"gpt2",
"text-generation",
"conversational",
"autotrain_compatible",
"endpoints_compatible",
"text-generation-inference",
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
null | null | {} | Joanna88/Quenya | null | [
"region:us"
] | null | 2022-03-02T23:29:04+00:00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.