modelId
stringlengths 4
81
| tags
list | pipeline_tag
stringclasses 17
values | config
dict | downloads
int64 0
59.7M
| first_commit
timestamp[ns, tz=UTC] | card
stringlengths 51
438k
|
---|---|---|---|---|---|---|
dccuchile/bert-base-spanish-wwm-cased-finetuned-pos | [
"pytorch",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
|
dccuchile/bert-base-spanish-wwm-cased-finetuned-qa-mlqa | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
]
| question-answering | {
"architectures": [
"BertForQuestionAnswering"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
|
dccuchile/bert-base-spanish-wwm-cased-finetuned-xnli | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 28 | null | Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
|
dccuchile/bert-base-spanish-wwm-uncased-finetuned-mldoc | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 39 | 2023-01-24T14:42:05Z | Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
|
dccuchile/bert-base-spanish-wwm-uncased-finetuned-ner | [
"pytorch",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
|
dccuchile/bert-base-spanish-wwm-uncased-finetuned-pos | [
"pytorch",
"bert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"BertForTokenClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 278.51 +/- 17.32
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
dccuchile/bert-base-spanish-wwm-uncased-finetuned-qa-mlqa | [
"pytorch",
"bert",
"question-answering",
"transformers",
"autotrain_compatible"
]
| question-answering | {
"architectures": [
"BertForQuestionAnswering"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- conll2003
metrics:
- precision
- recall
- f1
- accuracy
model-index:
- name: bert-finetuned-ner
results:
- task:
name: Token Classification
type: token-classification
dataset:
name: conll2003
type: conll2003
config: conll2003
split: train
args: conll2003
metrics:
- name: Precision
type: precision
value: 0.9371580169126181
- name: Recall
type: recall
value: 0.9511948838774823
- name: F1
type: f1
value: 0.9441242796291656
- name: Accuracy
type: accuracy
value: 0.9864455171601814
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bert-finetuned-ner
This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0622
- Precision: 0.9372
- Recall: 0.9512
- F1: 0.9441
- Accuracy: 0.9864
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
| 0.0877 | 1.0 | 1756 | 0.0710 | 0.9192 | 0.9318 | 0.9255 | 0.9816 |
| 0.0352 | 2.0 | 3512 | 0.0641 | 0.9286 | 0.9478 | 0.9381 | 0.9857 |
| 0.0172 | 3.0 | 5268 | 0.0622 | 0.9372 | 0.9512 | 0.9441 | 0.9864 |
### Framework versions
- Transformers 4.25.1
- Pytorch 1.13.1+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
dccuchile/bert-base-spanish-wwm-uncased-finetuned-xnli | [
"pytorch",
"bert",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 36 | null | ---
tags:
- generated_from_trainer
model-index:
- name: DistilBERT-POWO_Scratch
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# DistilBERT-POWO_Scratch
This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 4.9068
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 5
- eval_batch_size: 16
- seed: 42
- gradient_accumulation_steps: 8
- total_train_batch_size: 40
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 7.104 | 0.18 | 200 | 5.9641 |
| 5.6973 | 0.36 | 400 | 5.5992 |
| 5.5464 | 0.54 | 600 | 5.4564 |
| 5.377 | 0.72 | 800 | 5.3606 |
| 5.2162 | 0.9 | 1000 | 5.2674 |
| 5.1499 | 1.08 | 1200 | 5.2080 |
| 5.1313 | 1.26 | 1400 | 5.1447 |
| 5.0138 | 1.44 | 1600 | 5.1041 |
| 4.9509 | 1.62 | 1800 | 5.0572 |
| 4.9598 | 1.8 | 2000 | 5.0185 |
| 4.9581 | 1.98 | 2200 | 5.0109 |
| 4.8458 | 2.16 | 2400 | 4.9608 |
| 4.953 | 2.34 | 2600 | 4.9482 |
| 4.7448 | 2.52 | 2800 | 4.9211 |
| 4.8574 | 2.71 | 3000 | 4.9093 |
| 4.8402 | 2.89 | 3200 | 4.8980 |
### Framework versions
- Transformers 4.25.1
- Pytorch 1.13.1+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
dccuchile/distilbert-base-spanish-uncased-finetuned-ner | [
"pytorch",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"DistilBertForTokenClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 28 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Huggy
library_name: ml-agents
---
# **ppo** Agent playing **Huggy**
This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy
2. Step 1: Write your model_id: Uswa04/ppo-Huggy
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play 👀
|
dccuchile/distilbert-base-spanish-uncased-finetuned-pos | [
"pytorch",
"distilbert",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"DistilBertForTokenClassification"
],
"model_type": "distilbert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
license: mit
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: bart-large-cnn-samsum-ElectrifAi_v10
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# bart-large-cnn-samsum-ElectrifAi_v10
This model is a fine-tuned version of [philschmid/bart-large-cnn-samsum](https://huggingface.co/philschmid/bart-large-cnn-samsum) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.1748
- Rouge1: 58.3392
- Rouge2: 35.1686
- Rougel: 45.4136
- Rougelsum: 56.9138
- Gen Len: 108.375
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 4
- eval_batch_size: 4
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:|
| No log | 1.0 | 21 | 1.1573 | 56.0772 | 34.1572 | 44.3652 | 54.8621 | 106.0833 |
| No log | 2.0 | 42 | 1.1764 | 57.7245 | 34.6517 | 45.67 | 56.3426 | 106.4167 |
| No log | 3.0 | 63 | 1.1748 | 58.3392 | 35.1686 | 45.4136 | 56.9138 | 108.375 |
### Framework versions
- Transformers 4.25.1
- Pytorch 1.12.1
- Datasets 2.6.1
- Tokenizers 0.13.2
|
Chae/botman | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | This is a dummy repo created to test the huggingface_hub Python library. |
Chaewon/mnmt_decoder_en | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": true,
"max_length": 50
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
tags:
- FrozenLake-v1-4x4-no_slippery
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-FrozenLake-v1-4x4-noSlippery
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-4x4-no_slippery
type: FrozenLake-v1-4x4-no_slippery
metrics:
- type: mean_reward
value: 1.00 +/- 0.00
name: mean_reward
verified: false
---
# **Q-Learning** Agent playing1 **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="SorinAbrudan/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
```
|
Chakita/Friends | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: distilbert-base-uncased-finetuned-emotion
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert-base-uncased-finetuned-emotion
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 64
- eval_batch_size: 64
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 2
### Framework versions
- Transformers 4.11.3
- Pytorch 1.12.1
- Datasets 1.16.1
- Tokenizers 0.10.3
|
Chakita/KROBERT | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"masked-lm",
"fill-in-the-blanks",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null |
---
tags:
- unity-ml-agents
- ml-agents
- deep-reinforcement-learning
- reinforcement-learning
- ML-Agents-Huggy
library_name: ml-agents
---
# **ppo** Agent playing **Huggy**
This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
## Usage (with ML-Agents)
The Documentation: https://github.com/huggingface/ml-agents#get-started
We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
### Resume the training
```
mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
```
### Watch your Agent play
You can watch your agent **playing directly in your browser:**.
1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy
2. Step 1: Write your model_id: Kenemo/ppo-Huggy
3. Step 2: Select your *.nn /*.onnx file
4. Click on Watch the agent play 👀
|
Chakita/KannadaBERT | [
"pytorch",
"roberta",
"fill-mask",
"transformers",
"masked-lm",
"fill-in-the-blanks",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
tags:
- Pixelcopter-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-Copter
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pixelcopter-PLE-v0
type: Pixelcopter-PLE-v0
metrics:
- type: mean_reward
value: 21.20 +/- 13.90
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **Pixelcopter-PLE-v0**
This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
Champion/test_upload_vox2_wavlm_epoch8 | [
"sidekit",
"audio"
]
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: Aeona-Beta-New
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Aeona-Beta-New
This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 3.5170
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 9
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 1
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 3.6794 | 1.0 | 7463 | 3.5170 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.11.0
- Datasets 2.1.0
- Tokenizers 0.12.1
|
Chan/distilroberta-base-finetuned-wikitext2 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
---
SFT on [Reddit TL;DR](https://huggingface.co/datasets/CarperAI/openai_summarize_tldr) |
CharlieChen/feedback-bigbird | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2023-01-24T16:19:51Z | ---
license: mit
tags:
- generated_from_keras_callback
model-index:
- name: Ashraf-kasem/custom_gpt2_frames_text
results: []
---
<!-- This model card has been generated automatically according to the information Keras had access to. You should
probably proofread and complete it, then remove this comment. -->
# Ashraf-kasem/custom_gpt2_frames_text
This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset.
It achieves the following results on the evaluation set:
- Train Loss: 1.3938
- Validation Loss: 2.0834
- Epoch: 29
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 188670, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
- training_precision: mixed_float16
### Training results
| Train Loss | Validation Loss | Epoch |
|:----------:|:---------------:|:-----:|
| 5.4252 | 4.4731 | 0 |
| 4.1781 | 3.6928 | 1 |
| 3.5744 | 3.2572 | 2 |
| 3.1856 | 2.9789 | 3 |
| 2.9095 | 2.7887 | 4 |
| 2.6999 | 2.6534 | 5 |
| 2.5334 | 2.5484 | 6 |
| 2.3969 | 2.4706 | 7 |
| 2.2826 | 2.4102 | 8 |
| 2.1842 | 2.3518 | 9 |
| 2.0988 | 2.3096 | 10 |
| 2.0236 | 2.2740 | 11 |
| 1.9569 | 2.2443 | 12 |
| 1.8960 | 2.2214 | 13 |
| 1.8411 | 2.1954 | 14 |
| 1.7913 | 2.1815 | 15 |
| 1.7457 | 2.1652 | 16 |
| 1.7034 | 2.1552 | 17 |
| 1.6648 | 2.1398 | 18 |
| 1.6288 | 2.1289 | 19 |
| 1.5955 | 2.1213 | 20 |
| 1.5643 | 2.1114 | 21 |
| 1.5359 | 2.1071 | 22 |
| 1.5094 | 2.0998 | 23 |
| 1.4846 | 2.0942 | 24 |
| 1.4622 | 2.0911 | 25 |
| 1.4420 | 2.0893 | 26 |
| 1.4233 | 2.0879 | 27 |
| 1.4074 | 2.0838 | 28 |
| 1.3938 | 2.0834 | 29 |
### Framework versions
- Transformers 4.25.1
- TensorFlow 2.9.0
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Cheapestmedsshop/Buymodafinilus | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
---
SFT on [Reddit TL;DR](https://huggingface.co/datasets/CarperAI/openai_summarize_tldr) |
Cheatham/xlm-roberta-base-finetuned | [
"pytorch",
"xlm-roberta",
"text-classification",
"transformers"
]
| text-classification | {
"architectures": [
"XLMRobertaForSequenceClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 20 | null | ---
license: mit
---
SFT on [Reddit TL;DR](https://huggingface.co/datasets/CarperAI/openai_summarize_tldr) |
CheonggyeMountain-Sherpa/kogpt-trinity-poem | [
"pytorch",
"gpt2",
"text-generation",
"transformers"
]
| text-generation | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 15 | null | ---
library_name: stable-baselines3
tags:
- BipedalWalker-v3
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: BipedalWalker-v3
type: BipedalWalker-v3
metrics:
- type: mean_reward
value: 200.13 +/- 145.83
name: mean_reward
verified: false
---
# **PPO** Agent playing **BipedalWalker-v3**
This is a trained model of a **PPO** agent playing **BipedalWalker-v3**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Chertilasus/main | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: mit
language:
- pt
metrics:
- bleurt
thumbnail: Word2vec for Portuguese Legal Domain
pipeline_tag: summarization
---
[](https://www.inesc-id.pt/projects/PR07005/)
Work developed as part of [Project IRIS](https://www.inesc-id.pt/projects/PR07005/).
Word2Vec trained for Portuguese Legal Domain
## Citing & Authors
### Contributions
[@MartimZanatti](https://github.com/MartimZanatti)
|
Chester/traffic-rec | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: creativeml-openrail-m
tags:
- pytorch
- diffusers
- stable-diffusion
- text-to-image
- diffusion-models-class
- dreambooth-hackathon
- landscape
widget:
- text: a photo of fgreeneruins ruins in Paris in front of the Arc de triomphe, mdjrny-v4
style
---
# DreamBooth model for the fgreeneruins concept trained on the CCMat/db-forest-ruins dataset.
This is a Stable Diffusion model fine-tuned on the fgreeneruins concept with DreamBooth. It can be used by modifying the `instance_prompt`: **a photo of fgreeneruins ruins**
This model was created as part of the DreamBooth Hackathon 🔥. Visit the [organisation page](https://huggingface.co/dreambooth-hackathon) for instructions on how to take part!
## Description
This is a Stable Diffusion model fine-tuned on `ruins` images for the landscape theme.<br>
Concept: **fgreeneruins** : forest ruins, greenery ruins<br>
Pretrained Model: [prompthero/openjourney](https://huggingface.co/prompthero/openjourney)<br>
Learning rate: 2e-6<br>
## Usage
```python
from diffusers import StableDiffusionPipeline
pipeline = StableDiffusionPipeline.from_pretrained('CCMat/fgreeneruins-ruins-mdj')
image = pipeline().images[0]
image
```
## Samples
Prompt: "high quality photo of Venice in fgreeneruins ruins, HDR, UHD, 64K"

<br>
Prompt: "Fallout concept of fgreeneruins ruins in underwater city, unreal engine 5"

<br>
Prompt: "New York City in fgreeneruins ruins with the Empire State Building in the background by Alejandro Bursido"

<br>
Prompt: "Manhattan in fgreeneruins ruins by Makoto Shinkai"

<br>
Prompt: "The Taj Mahal in fgreeneruins ruins, professional photograph"

|
Ching/negation_detector | [
"pytorch",
"roberta",
"question-answering",
"transformers",
"autotrain_compatible"
]
| question-answering | {
"architectures": [
"RobertaForQuestionAnswering"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 256.15 +/- 18.50
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Chinmay/mlindia | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language: en
thumbnail: http://www.huggingtweets.com/btctn-eth-solana/1674579901677/predictions.png
tags:
- huggingtweets
widget:
- text: "My dream is"
---
<div class="inline-flex flex-col" style="line-height: 1.5;">
<div class="flex">
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1140965421908144128/_80iSgFS_400x400.png')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1258321209730760705/1hkrHoOT_400x400.jpg')">
</div>
<div
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1472933274209107976/6u-LQfjG_400x400.jpg')">
</div>
</div>
<div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div>
<div style="text-align: center; font-size: 16px; font-weight: 800">Bitcoin News & ETH Zürich & Solana</div>
<div style="text-align: center; font-size: 14px;">@btctn-eth-solana</div>
</div>
I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).
Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!
## How does it work?
The model uses the following pipeline.

To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).
## Training data
The model was trained on tweets from Bitcoin News & ETH Zürich & Solana.
| Data | Bitcoin News | ETH Zürich | Solana |
| --- | --- | --- | --- |
| Tweets downloaded | 3249 | 3246 | 3217 |
| Retweets | 28 | 1023 | 1697 |
| Short tweets | 3 | 34 | 214 |
| Tweets kept | 3218 | 2189 | 1306 |
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/fvjzvqzc/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
## Training procedure
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @btctn-eth-solana's tweets.
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/u13r00ou) for full transparency and reproducibility.
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/u13r00ou/artifacts) is logged and versioned.
## How to use
You can use this model directly with a pipeline for text generation:
```python
from transformers import pipeline
generator = pipeline('text-generation',
model='huggingtweets/btctn-eth-solana')
generator("My dream is", num_return_sequences=5)
```
## Limitations and bias
The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).
In addition, the data present in the user's tweets further affects the text generated by the model.
## About
*Built by Boris Dayma*
[](https://twitter.com/intent/follow?screen_name=borisdayma)
For more details, visit the project repository.
[](https://github.com/borisdayma/huggingtweets)
|
Chiuchiyin/DialoGPT-small-Donald | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | 2023-01-24T17:05:56Z | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 1908 with parameters:
```
{'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'torch.optim.adamw.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": 1908,
"warmup_steps": 191,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
(2): Normalize()
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Chiuchiyin/Donald | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2023-01-24T17:06:08Z | ---
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
---
# {MODEL_NAME}
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search.
<!--- Describe your model here -->
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('{MODEL_NAME}')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}')
model = AutoModel.from_pretrained('{MODEL_NAME}')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, mean pooling.
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Sentence embeddings:")
print(sentence_embeddings)
```
## Evaluation Results
<!--- Describe how your model was evaluated -->
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME})
## Training
The model was trained with the parameters:
**DataLoader**:
`torch.utils.data.dataloader.DataLoader` of length 2604 with parameters:
```
{'batch_size': 2, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
```
**Loss**:
`sentence_transformers.losses.MarginMSELoss.MarginMSELoss`
Parameters of the fit()-Method:
```
{
"epochs": 1,
"evaluation_steps": 0,
"evaluator": "NoneType",
"max_grad_norm": 1,
"optimizer_class": "<class 'transformers.optimization.AdamW'>",
"optimizer_params": {
"lr": 2e-05
},
"scheduler": "WarmupLinear",
"steps_per_epoch": null,
"warmup_steps": 260,
"weight_decay": 0.01
}
```
## Full Model Architecture
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
(1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
)
```
## Citing & Authors
<!--- Describe where people can find more information --> |
Contrastive-Tension/BERT-Large-CT | [
"pytorch",
"tf",
"jax",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | # Joint Pruning, Quantization and Distillation for BERT-large/SQuADv1.1
## Setup
```bash
git clone https://github.com/vuiseng9/optimum-intel
cd optimum-intel
pip install -e .[openvino,nncf]
cd examples/openvino/question-answering/
pip install -r requirements.txt
pip install wandb # optional
```
## Run
```bash
NNCFCFG=/path/to/openvino_config.json
MASTER_PORT=<PORTID>
RUNID=<RUN_IDENTIFIER>
OUTDIR=/path/to/saved_model
NEPOCH=30
python -m torch.distributed.launch \
--nproc_per_node 4 \
--master_port $MASTER_PORT \
run_qa.py \
--model_name_or_path bert-large-uncased-whole-word-masking \
--dataset_name squad \
--teacher_model_or_path bert-large-uncased-whole-word-masking-finetuned-squad \
--distillation_weight 0.9 \
--do_eval \
--fp16 \
--do_train \
--learning_rate 3e-5 \
--num_train_epochs $NEPOCH \
--per_device_eval_batch_size 128 \
--per_device_train_batch_size 16 \
--max_seq_length 384 \
--doc_stride 128 \
--logging_steps 1 \
--evaluation_strategy steps \
--eval_steps 250 \
--save_steps 500 \
--overwrite_output_dir \
--run_name $RUNID \
--output_dir $OUTDIR \
--nncf_compression_config $NNCFCFG
```
### Reference Results
```
Global Step: 41000
F1: 90.842
EM: 84.276
Structured Sparsity (linear): 77.73%
``` |
CrypticT1tan/DialoGPT-medium-harrypotter | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- FrozenLake-v1-4x4-no_slippery
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-learning-frozenlake-v1-4x4
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-4x4-no_slippery
type: FrozenLake-v1-4x4-no_slippery
metrics:
- type: mean_reward
value: 1.00 +/- 0.00
name: mean_reward
verified: false
---
# **Q-Learning** Agent playing1 **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="Kenemo/q-learning-frozenlake-v1-4x4", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
```
|
CurtisBowser/DialoGPT-medium-sora | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
license: cc-by-3.0
---
# Embeddings
A collection of embeddings I've created.
### Araknope
A stable diffusion embedding trained on a collection of high resolution macro photos of spiders.
**Trigger**: `araknope`
### Beez
A stable diffusion embedding trained on a collection of high resolution macro photos of bees.
**Trigger**: `beez`
### Pmantis
A stable diffusion embedding trained on a collection of high resolution macro photos of praying mantises.
**Trigger**: `pmantis`
|
D3xter1922/distilbert-base-uncased-finetuned-cola | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null |
<h1><b>Better Pastel Mix</b></h1>
<a href="https://huggingface.co/andite/pastel-mix">Pastel Mix</a> but better.

|
D3xter1922/electra-base-discriminator-finetuned-cola | [
"pytorch",
"tensorboard",
"electra",
"text-classification",
"dataset:glue",
"transformers",
"generated_from_trainer",
"license:apache-2.0",
"model-index"
]
| text-classification | {
"architectures": [
"ElectraForSequenceClassification"
],
"model_type": "electra",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 68 | null | ---
license: openrail
language:
- en
- fa
metrics:
- code_eval
--- |
Danih1502/t5-small-finetuned-en-to-de | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: openrail
---
# Detection Challenges
TODO: explain what this repo is
TODO: link to larger source |
DannyMichael/ECU911 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | SFT on [Reddit TL;DR](https://huggingface.co/datasets/CarperAI/openai_summarize_tldr) |
Darkecho789/email-gen | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 252.66 +/- 17.03
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
DarkestSky/distilbert-base-uncased-finetuned-ner | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- generated_from_trainer
model-index:
- name: jeju-ko-nmt-v7
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# jeju-ko-nmt-v7
This model is a fine-tuned version of [leadawon/jeju-ko-nmt-v6](https://huggingface.co/leadawon/jeju-ko-nmt-v6) on an unknown dataset.
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Tokenizers 0.13.2
|
DataikuNLP/camembert-base | [
"pytorch",
"tf",
"camembert",
"fill-mask",
"fr",
"dataset:oscar",
"arxiv:1911.03894",
"transformers",
"license:mit",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"CamembertForMaskedLM"
],
"model_type": "camembert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: artistic-2.0
---
# Model Card for 7thv3HoloAbyss
<!-- Provide a quick summary of what the model is/does. -->
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
# Model Details
## Model Description
HoloCreamSafe (A) | 7th Layer V3 A (B) @ 0.5 Weighted Sum
HoloCreamSafe7thLayerV3A (A) | AbyssOrangeMix2_hard @ 0.5 Weighted Sum
## Model Sources
HoloCreamSafe
https://pixeldrain.com/u/SKoxx1wH
7th V3 A
https://huggingface.co/syaimu/7th_Layer/tree/main
AbyssOrangeMix2
https://huggingface.co/WarriorMama777/OrangeMixs/tree/main/Models/AbyssOrangeMix2
## Recommendations
DPM++ 2S a Karras
CFG 5
Steps 60
4x UltraSharp
Denoise: 0.56
bad-artist embed: https://huggingface.co/NiXXerHATTER59/bad-artist/tree/main
## How to Get Started with the Model
https://files.catbox.moe/v58q5q.png
Positive:nsfw, inside, outside, 1girl, (eyeliner:1.1), (pastel_goth:1.1), blue_eyes, long_hair, bangs, straight bangs, fringe, shiny_skin, (huge_thighs:1.1), (wide_hips:1.1), (huge breasts:1.1), (ostrich onesie:1.2), full_body, medium_shot
Negative:bad-artist, out of frame, cropped
## Results
<img src="https://i.imgur.com/VTHwReo.png" width="480" height="">
<img src="https://i.imgur.com/UAKTQR0.png" width="480" height=""> |
DataikuNLP/paraphrase-albert-small-v2 | [
"pytorch",
"albert",
"arxiv:1908.10084",
"sentence-transformers",
"feature-extraction",
"sentence-similarity",
"transformers",
"license:apache-2.0"
]
| sentence-similarity | {
"architectures": [
"AlbertModel"
],
"model_type": "albert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 628 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilroberta-base-mrpc-glu-cristian-agudelo
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: glue
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.821078431372549
- name: F1
type: f1
value: 0.8712522045855379
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilroberta-base-mrpc-glu-cristian-agudelo
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the glue dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9131
- Accuracy: 0.8211
- F1: 0.8713
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|
| 0.285 | 1.09 | 500 | 0.8959 | 0.8407 | 0.8845 |
| 0.2653 | 2.18 | 1000 | 0.9131 | 0.8211 | 0.8713 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/bert-base-multilingual-cased-finetuned-luganda | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 16 | 2023-01-25T02:52:15Z | ---
license: cc0-1.0
---
https://huggingface.co/deadman44/SD_Anime_Merged_Models |
Davlan/bert-base-multilingual-cased-finetuned-luo | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- imagefolder
metrics:
- accuracy
- f1
- recall
- precision
model-index:
- name: Brain_Tumor_Classification_using_swin_transformer
results:
- task:
name: Image Classification
type: image-classification
dataset:
name: imagefolder
type: imagefolder
config: default
split: train
args: default
metrics:
- name: Accuracy
type: accuracy
value: 0.9949179046129789
- name: F1
type: f1
value: 0.9949179046129789
- name: Recall
type: recall
value: 0.9949179046129789
- name: Precision
type: precision
value: 0.9949179046129789
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# Brain_Tumor_Classification_using_swin_transformer
This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset.
It achieves the following results on the evaluation set:
- Loss: 0.0118
- Accuracy: 0.9949
- F1: 0.9949
- Recall: 0.9949
- Precision: 0.9949
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 128
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 3
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:|
| 0.081 | 1.0 | 180 | 0.0557 | 0.9832 | 0.9832 | 0.9832 | 0.9832 |
| 0.0816 | 2.0 | 360 | 0.0187 | 0.9937 | 0.9937 | 0.9937 | 0.9937 |
| 0.0543 | 3.0 | 540 | 0.0118 | 0.9949 | 0.9949 | 0.9949 | 0.9949 |
### Framework versions
- Transformers 4.23.1
- Pytorch 1.13.0
- Datasets 2.6.1
- Tokenizers 0.13.1
|
Davlan/byt5-base-eng-yor-mt | [
"pytorch",
"t5",
"text2text-generation",
"arxiv:2103.08647",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"T5ForConditionalGeneration"
],
"model_type": "t5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | null | ---
tags:
- autotrain
- text-classification
language:
- en
widget:
- text: "I love AutoTrain 🤗"
datasets:
- braedennorris/autotrain-data-enterprise_v_consumer
co2_eq_emissions:
emissions: 1.1718652256627062
---
Enterprise = 1
Consumer = 0
# Model Trained Using AutoTrain
- Problem type: Binary Classification
- Model ID: 3052187265
- CO2 Emissions (in grams): 1.1719
## Validation Metrics
- Loss: 0.428
- Accuracy: 0.824
- Precision: 0.805
- Recall: 0.896
- AUC: 0.891
- F1: 0.848
## Usage
You can use cURL to access this model:
```
$ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/braedennorris/autotrain-enterprise_v_consumer-3052187265
```
Or Python API:
```
from transformers import AutoModelForSequenceClassification, AutoTokenizer
model = AutoModelForSequenceClassification.from_pretrained("braedennorris/autotrain-enterprise_v_consumer-3052187265", use_auth_token=True)
tokenizer = AutoTokenizer.from_pretrained("braedennorris/autotrain-enterprise_v_consumer-3052187265", use_auth_token=True)
inputs = tokenizer("I love AutoTrain", return_tensors="pt")
outputs = model(**inputs)
``` |
Davlan/mT5_base_yoruba_adr | [
"pytorch",
"mt5",
"text2text-generation",
"arxiv:2003.10564",
"arxiv:2103.08647",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"MT5ForConditionalGeneration"
],
"model_type": "mt5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: openrail
---
Also on https://civitai.com/models/5301/elysium-kuro-anime
Anime model is custom mix + finetune on dataset of high quality images (mix including Anything 4.0, WD 1.4 Booru, Seek Art Mega V1) and contains the contains the kl-f8-anime2 VAE from Waifu Diffusion.
Example settings:
Negative prompt: (lowres:1.1), (worst quality:1.2), (low quality:1.1), bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, normal quality, jpeg artifacts, signature, watermark, username, blurry
(General model): Clip skip 1, VAE: 'vae-ft-mse-840000' from StabilityAI (included)
(Anime model): Clip skip 2, VAE: 'kl-f8-anime2.ckpt' from Waifu Diffusion (included)
Example images from anime model:

General model coming soon.
|
Davlan/mt5-small-en-pcm | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"MT5ForConditionalGeneration"
],
"model_type": "mt5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
tags:
- CartPole-v1
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-CartPole-v1
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: CartPole-v1
type: CartPole-v1
metrics:
- type: mean_reward
value: 500.00 +/- 0.00
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **CartPole-v1**
This is a trained model of a **Reinforce** agent playing **CartPole-v1** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
Davlan/mt5-small-pcm-en | [
"pytorch",
"mt5",
"text2text-generation",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"MT5ForConditionalGeneration"
],
"model_type": "mt5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
license: apache-2.0
datasets:
- allenai/objaverse
- allenai/soda
metrics:
- accuracy
- cer
- character
- code_eval
library_name: adapter-transformers
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1).
# Model Details
## Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
## Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
# Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
## Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
## Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
## Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
# Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
## Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
# Training Details
## Training Data
<!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
## Training Procedure [optional]
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
### Preprocessing
[More Information Needed]
### Speeds, Sizes, Times
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
# Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
## Testing Data, Factors & Metrics
### Testing Data
<!-- This should link to a Data Card if possible. -->
[More Information Needed]
### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
## Results
[More Information Needed]
### Summary
# Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
# Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
# Technical Specifications [optional]
## Model Architecture and Objective
[More Information Needed]
## Compute Infrastructure
[More Information Needed]
### Hardware
[More Information Needed]
### Software
[More Information Needed]
# Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
# Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
# More Information [optional]
[More Information Needed]
# Model Card Authors [optional]
[More Information Needed]
# Model Card Contact
[More Information Needed]
|
Davlan/mt5_base_yor_eng_mt | [
"pytorch",
"mt5",
"text2text-generation",
"arxiv:2103.08647",
"transformers",
"autotrain_compatible"
]
| text2text-generation | {
"architectures": [
"MT5ForConditionalGeneration"
],
"model_type": "mt5",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
license: mit
datasets:
- emotion
language:
- en
library_name: transformers
---
- **Developed by:** Heegyu Kim
- **Model type:** GPT-2
- **Language(s) (NLP):** English
- **License:** MIT
# Uses
```
from transformers import pipeline
generator = pipeline('text-generation', 'heegyu/gpt2-emotion')
prompt = "sadness I'm so " # start token should be one of ["sadness", "joy", "love", "anger", "fear", "surprise"]
print(generator(prompt)[0]['generated_text'])
>>> sadness I'm so tired of seeing all the stupid things that i ve learned from past years that i feel like ive been so stupid and blah and then i feel like ive just wasted all my energy doing stupid shit like how i never
``` |
Davlan/xlm-roberta-base-finetuned-amharic | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 401 | null | ---
library_name: stable-baselines3
tags:
- AntBulletEnv-v0
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: A2C
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: AntBulletEnv-v0
type: AntBulletEnv-v0
metrics:
- type: mean_reward
value: 2164.04 +/- 80.36
name: mean_reward
verified: false
---
# **A2C** Agent playing **AntBulletEnv-v0**
This is a trained model of a **A2C** agent playing **AntBulletEnv-v0**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Davlan/xlm-roberta-base-finetuned-english | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: apache-2.0
tags:
- vision
- depth-estimation
- generated_from_trainer
model-index:
- name: glpn-nyu-finetuned-diode-230125-042306
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# glpn-nyu-finetuned-diode-230125-042306
This model is a fine-tuned version of [vinvino02/glpn-nyu](https://huggingface.co/vinvino02/glpn-nyu) on the diode-subset dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4380
- Mae: 0.4255
- Rmse: 0.6150
- Abs Rel: 0.4444
- Log Mae: 0.1724
- Log Rmse: 0.2247
- Delta1: 0.3675
- Delta2: 0.6329
- Delta3: 0.8147
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0003
- train_batch_size: 24
- eval_batch_size: 48
- seed: 2022
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.15
- num_epochs: 100
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Mae | Rmse | Abs Rel | Log Mae | Log Rmse | Delta1 | Delta2 | Delta3 |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:-------:|:-------:|:--------:|:------:|:------:|:------:|
| 1.0761 | 1.0 | 72 | 0.5029 | 0.4779 | 0.6689 | 0.5504 | 0.2005 | 0.2590 | 0.3023 | 0.5336 | 0.8000 |
| 0.4776 | 2.0 | 144 | 0.4638 | 0.4495 | 0.6305 | 0.4854 | 0.1854 | 0.2371 | 0.3323 | 0.5842 | 0.7749 |
| 0.4668 | 3.0 | 216 | 0.4843 | 0.4705 | 0.6368 | 0.5459 | 0.1961 | 0.2469 | 0.3115 | 0.5258 | 0.7237 |
| 0.439 | 4.0 | 288 | 0.4596 | 0.4383 | 0.6224 | 0.4903 | 0.1794 | 0.2347 | 0.3564 | 0.6054 | 0.7900 |
| 0.4629 | 5.0 | 360 | 0.4846 | 0.4622 | 0.6347 | 0.5505 | 0.1914 | 0.2466 | 0.3240 | 0.5567 | 0.7432 |
| 0.4557 | 6.0 | 432 | 0.4660 | 0.4399 | 0.6223 | 0.5107 | 0.1801 | 0.2373 | 0.3605 | 0.5922 | 0.7992 |
| 0.4131 | 7.0 | 504 | 0.4737 | 0.4466 | 0.6291 | 0.4877 | 0.1847 | 0.2387 | 0.3592 | 0.5753 | 0.7545 |
| 0.3742 | 8.0 | 576 | 0.4756 | 0.4555 | 0.6363 | 0.5127 | 0.1879 | 0.2424 | 0.3462 | 0.5642 | 0.7581 |
| 0.3943 | 9.0 | 648 | 0.4816 | 0.4606 | 0.6340 | 0.5566 | 0.1901 | 0.2459 | 0.3304 | 0.5512 | 0.7484 |
| 0.3699 | 10.0 | 720 | 0.4779 | 0.4527 | 0.6289 | 0.5402 | 0.1869 | 0.2433 | 0.3419 | 0.5659 | 0.7699 |
| 0.3695 | 11.0 | 792 | 0.4335 | 0.4185 | 0.6141 | 0.4174 | 0.1685 | 0.2210 | 0.3837 | 0.6484 | 0.8142 |
| 0.4268 | 12.0 | 864 | 0.4831 | 0.4622 | 0.6345 | 0.5491 | 0.1912 | 0.2456 | 0.3283 | 0.5515 | 0.7369 |
| 0.4295 | 13.0 | 936 | 0.4512 | 0.4421 | 0.6267 | 0.4498 | 0.1803 | 0.2292 | 0.3508 | 0.5951 | 0.7803 |
| 0.4071 | 14.0 | 1008 | 0.4632 | 0.4514 | 0.6295 | 0.4755 | 0.1842 | 0.2334 | 0.3514 | 0.5676 | 0.7346 |
| 0.4383 | 15.0 | 1080 | 0.4655 | 0.4394 | 0.6283 | 0.4894 | 0.1793 | 0.2370 | 0.3762 | 0.6022 | 0.7816 |
| 0.4009 | 16.0 | 1152 | 0.4684 | 0.4434 | 0.6294 | 0.5215 | 0.1814 | 0.2403 | 0.3601 | 0.5881 | 0.7980 |
| 0.3889 | 17.0 | 1224 | 0.4619 | 0.4379 | 0.6357 | 0.4623 | 0.1791 | 0.2389 | 0.3946 | 0.6088 | 0.7665 |
| 0.4114 | 18.0 | 1296 | 0.4838 | 0.4642 | 0.6358 | 0.5514 | 0.1924 | 0.2471 | 0.3310 | 0.5444 | 0.7336 |
| 0.3656 | 19.0 | 1368 | 0.4771 | 0.4524 | 0.6317 | 0.5284 | 0.1869 | 0.2428 | 0.3379 | 0.5765 | 0.7665 |
| 0.4117 | 20.0 | 1440 | 0.4388 | 0.4187 | 0.6257 | 0.4113 | 0.1680 | 0.2270 | 0.4162 | 0.6619 | 0.8001 |
| 0.3641 | 21.0 | 1512 | 0.4593 | 0.4374 | 0.6238 | 0.4773 | 0.1779 | 0.2332 | 0.3705 | 0.6088 | 0.7745 |
| 0.3559 | 22.0 | 1584 | 0.4534 | 0.4300 | 0.6242 | 0.4663 | 0.1747 | 0.2329 | 0.3854 | 0.6288 | 0.7987 |
| 0.3897 | 23.0 | 1656 | 0.4695 | 0.4506 | 0.6292 | 0.5215 | 0.1852 | 0.2404 | 0.3432 | 0.5746 | 0.7698 |
| 0.4281 | 24.0 | 1728 | 0.4920 | 0.4693 | 0.6380 | 0.5835 | 0.1949 | 0.2514 | 0.3239 | 0.5352 | 0.7230 |
| 0.4113 | 25.0 | 1800 | 0.4525 | 0.4335 | 0.6405 | 0.4109 | 0.1757 | 0.2330 | 0.4046 | 0.6251 | 0.7878 |
| 0.3734 | 26.0 | 1872 | 0.4357 | 0.4159 | 0.6203 | 0.4158 | 0.1667 | 0.2241 | 0.4234 | 0.6609 | 0.7919 |
| 0.3408 | 27.0 | 1944 | 0.4544 | 0.4419 | 0.6257 | 0.4712 | 0.1806 | 0.2325 | 0.3525 | 0.5993 | 0.7850 |
| 0.3816 | 28.0 | 2016 | 0.4622 | 0.4465 | 0.6252 | 0.4919 | 0.1823 | 0.2346 | 0.3465 | 0.5844 | 0.7687 |
| 0.3643 | 29.0 | 2088 | 0.4534 | 0.4370 | 0.6219 | 0.4721 | 0.1778 | 0.2311 | 0.3653 | 0.6016 | 0.7886 |
| 0.3762 | 30.0 | 2160 | 0.4418 | 0.4302 | 0.6209 | 0.4394 | 0.1745 | 0.2261 | 0.3724 | 0.6226 | 0.7944 |
| 0.3704 | 31.0 | 2232 | 0.4723 | 0.4496 | 0.6271 | 0.5262 | 0.1848 | 0.2406 | 0.3477 | 0.5726 | 0.7679 |
| 0.3657 | 32.0 | 2304 | 0.4458 | 0.4311 | 0.6188 | 0.4580 | 0.1755 | 0.2283 | 0.3641 | 0.6167 | 0.8132 |
| 0.4261 | 33.0 | 2376 | 0.4551 | 0.4360 | 0.6240 | 0.4757 | 0.1778 | 0.2333 | 0.3707 | 0.6109 | 0.7859 |
| 0.3499 | 34.0 | 2448 | 0.4297 | 0.4131 | 0.6154 | 0.4141 | 0.1654 | 0.2222 | 0.4208 | 0.6585 | 0.8011 |
| 0.3316 | 35.0 | 2520 | 0.4553 | 0.4368 | 0.6200 | 0.4786 | 0.1780 | 0.2317 | 0.3625 | 0.6038 | 0.7848 |
| 0.3468 | 36.0 | 2592 | 0.4430 | 0.4275 | 0.6159 | 0.4460 | 0.1732 | 0.2253 | 0.3776 | 0.6204 | 0.8069 |
| 0.3439 | 37.0 | 2664 | 0.4550 | 0.4353 | 0.6234 | 0.4678 | 0.1772 | 0.2319 | 0.3741 | 0.6089 | 0.7857 |
| 0.3854 | 38.0 | 2736 | 0.4619 | 0.4410 | 0.6238 | 0.4960 | 0.1806 | 0.2359 | 0.3556 | 0.5983 | 0.7832 |
| 0.3521 | 39.0 | 2808 | 0.4743 | 0.4607 | 0.6317 | 0.5248 | 0.1902 | 0.2412 | 0.3241 | 0.5544 | 0.7351 |
| 0.3836 | 40.0 | 2880 | 0.4701 | 0.4508 | 0.6264 | 0.5249 | 0.1856 | 0.2399 | 0.3364 | 0.5747 | 0.7680 |
| 0.3601 | 41.0 | 2952 | 0.4749 | 0.4551 | 0.6281 | 0.5289 | 0.1879 | 0.2412 | 0.3288 | 0.5613 | 0.7672 |
| 0.3552 | 42.0 | 3024 | 0.4403 | 0.4215 | 0.6224 | 0.4299 | 0.1697 | 0.2267 | 0.4062 | 0.6517 | 0.8015 |
| 0.3582 | 43.0 | 3096 | 0.4307 | 0.4170 | 0.6174 | 0.4187 | 0.1676 | 0.2229 | 0.4009 | 0.6648 | 0.8095 |
| 0.332 | 44.0 | 3168 | 0.4663 | 0.4462 | 0.6244 | 0.5113 | 0.1834 | 0.2376 | 0.3452 | 0.5794 | 0.7755 |
| 0.3407 | 45.0 | 3240 | 0.4491 | 0.4333 | 0.6202 | 0.4714 | 0.1770 | 0.2309 | 0.3514 | 0.6155 | 0.8089 |
| 0.3613 | 46.0 | 3312 | 0.4767 | 0.4539 | 0.6282 | 0.5360 | 0.1874 | 0.2423 | 0.3333 | 0.5698 | 0.7528 |
| 0.3729 | 47.0 | 3384 | 0.4647 | 0.4435 | 0.6244 | 0.5128 | 0.1822 | 0.2381 | 0.3471 | 0.5923 | 0.7886 |
| 0.3304 | 48.0 | 3456 | 0.4431 | 0.4285 | 0.6150 | 0.4599 | 0.1739 | 0.2266 | 0.3627 | 0.6212 | 0.8095 |
| 0.357 | 49.0 | 3528 | 0.4558 | 0.4372 | 0.6219 | 0.4788 | 0.1784 | 0.2324 | 0.3579 | 0.6054 | 0.7861 |
| 0.3548 | 50.0 | 3600 | 0.4482 | 0.4308 | 0.6197 | 0.4612 | 0.1753 | 0.2295 | 0.3663 | 0.6237 | 0.8060 |
| 0.3332 | 51.0 | 3672 | 0.4533 | 0.4317 | 0.6252 | 0.4710 | 0.1755 | 0.2330 | 0.3745 | 0.6278 | 0.7971 |
| 0.3369 | 52.0 | 3744 | 0.4350 | 0.4189 | 0.6203 | 0.4229 | 0.1683 | 0.2249 | 0.4017 | 0.6581 | 0.8048 |
| 0.3379 | 53.0 | 3816 | 0.4344 | 0.4192 | 0.6192 | 0.4275 | 0.1683 | 0.2242 | 0.3953 | 0.6563 | 0.8049 |
| 0.3237 | 54.0 | 3888 | 0.4554 | 0.4392 | 0.6223 | 0.4822 | 0.1798 | 0.2328 | 0.3529 | 0.5952 | 0.7919 |
| 0.3523 | 55.0 | 3960 | 0.4511 | 0.4350 | 0.6207 | 0.4752 | 0.1771 | 0.2311 | 0.3673 | 0.6043 | 0.7962 |
| 0.326 | 56.0 | 4032 | 0.4460 | 0.4327 | 0.6208 | 0.4581 | 0.1756 | 0.2282 | 0.3644 | 0.6160 | 0.8041 |
| 0.3214 | 57.0 | 4104 | 0.4397 | 0.4252 | 0.6160 | 0.4384 | 0.1717 | 0.2241 | 0.3749 | 0.6333 | 0.8019 |
| 0.3342 | 58.0 | 4176 | 0.4493 | 0.4316 | 0.6176 | 0.4685 | 0.1754 | 0.2291 | 0.3640 | 0.6201 | 0.7951 |
| 0.3361 | 59.0 | 4248 | 0.4568 | 0.4394 | 0.6215 | 0.4935 | 0.1798 | 0.2341 | 0.3509 | 0.5953 | 0.7997 |
| 0.3141 | 60.0 | 4320 | 0.4425 | 0.4270 | 0.6182 | 0.4459 | 0.1727 | 0.2265 | 0.3829 | 0.6222 | 0.7972 |
| 0.3395 | 61.0 | 4392 | 0.4397 | 0.4229 | 0.6138 | 0.4450 | 0.1707 | 0.2246 | 0.3807 | 0.6318 | 0.8108 |
| 0.3124 | 62.0 | 4464 | 0.4232 | 0.4104 | 0.6128 | 0.4073 | 0.1641 | 0.2192 | 0.4074 | 0.6707 | 0.8209 |
| 0.3106 | 63.0 | 4536 | 0.4426 | 0.4223 | 0.6156 | 0.4504 | 0.1708 | 0.2267 | 0.3869 | 0.6404 | 0.8063 |
| 0.3268 | 64.0 | 4608 | 0.4391 | 0.4242 | 0.6160 | 0.4409 | 0.1715 | 0.2248 | 0.3818 | 0.6346 | 0.8082 |
| 0.3153 | 65.0 | 4680 | 0.4558 | 0.4355 | 0.6204 | 0.4877 | 0.1779 | 0.2333 | 0.3607 | 0.6069 | 0.8013 |
| 0.3063 | 66.0 | 4752 | 0.4367 | 0.4206 | 0.6154 | 0.4402 | 0.1694 | 0.2246 | 0.3891 | 0.6475 | 0.8129 |
| 0.3327 | 67.0 | 4824 | 0.4668 | 0.4466 | 0.6246 | 0.5172 | 0.1834 | 0.2383 | 0.3465 | 0.5778 | 0.7821 |
| 0.3189 | 68.0 | 4896 | 0.4423 | 0.4265 | 0.6171 | 0.4531 | 0.1726 | 0.2267 | 0.3748 | 0.6261 | 0.8109 |
| 0.3241 | 69.0 | 4968 | 0.4606 | 0.4433 | 0.6227 | 0.5013 | 0.1817 | 0.2353 | 0.3480 | 0.5843 | 0.7906 |
| 0.3165 | 70.0 | 5040 | 0.4359 | 0.4222 | 0.6128 | 0.4366 | 0.1702 | 0.2229 | 0.3809 | 0.6371 | 0.8136 |
| 0.3293 | 71.0 | 5112 | 0.4289 | 0.4150 | 0.6109 | 0.4181 | 0.1666 | 0.2197 | 0.3948 | 0.6586 | 0.8183 |
| 0.3256 | 72.0 | 5184 | 0.4457 | 0.4295 | 0.6174 | 0.4632 | 0.1747 | 0.2286 | 0.3657 | 0.6209 | 0.8117 |
| 0.3129 | 73.0 | 5256 | 0.4481 | 0.4314 | 0.6178 | 0.4680 | 0.1755 | 0.2291 | 0.3597 | 0.6201 | 0.8060 |
| 0.3197 | 74.0 | 5328 | 0.4365 | 0.4228 | 0.6150 | 0.4400 | 0.1706 | 0.2240 | 0.3744 | 0.6410 | 0.8159 |
| 0.323 | 75.0 | 5400 | 0.4351 | 0.4221 | 0.6137 | 0.4352 | 0.1703 | 0.2230 | 0.3752 | 0.6392 | 0.8141 |
| 0.3087 | 76.0 | 5472 | 0.4342 | 0.4215 | 0.6155 | 0.4321 | 0.1701 | 0.2232 | 0.3765 | 0.6439 | 0.8180 |
| 0.3126 | 77.0 | 5544 | 0.4362 | 0.4247 | 0.6160 | 0.4377 | 0.1717 | 0.2241 | 0.3731 | 0.6397 | 0.8094 |
| 0.3185 | 78.0 | 5616 | 0.4377 | 0.4234 | 0.6163 | 0.4446 | 0.1713 | 0.2256 | 0.3737 | 0.6433 | 0.8163 |
| 0.3195 | 79.0 | 5688 | 0.4426 | 0.4265 | 0.6174 | 0.4576 | 0.1731 | 0.2280 | 0.3734 | 0.6336 | 0.8149 |
| 0.3173 | 80.0 | 5760 | 0.4415 | 0.4259 | 0.6168 | 0.4550 | 0.1725 | 0.2273 | 0.3714 | 0.6381 | 0.8135 |
| 0.3207 | 81.0 | 5832 | 0.4374 | 0.4258 | 0.6172 | 0.4402 | 0.1722 | 0.2249 | 0.3689 | 0.6359 | 0.8095 |
| 0.3258 | 82.0 | 5904 | 0.4405 | 0.4283 | 0.6173 | 0.4445 | 0.1737 | 0.2257 | 0.3646 | 0.6299 | 0.8078 |
| 0.2971 | 83.0 | 5976 | 0.4430 | 0.4307 | 0.6185 | 0.4529 | 0.1748 | 0.2271 | 0.3604 | 0.6259 | 0.8040 |
| 0.3132 | 84.0 | 6048 | 0.4423 | 0.4277 | 0.6157 | 0.4478 | 0.1732 | 0.2252 | 0.3703 | 0.6270 | 0.8049 |
| 0.3281 | 85.0 | 6120 | 0.4378 | 0.4240 | 0.6152 | 0.4368 | 0.1713 | 0.2238 | 0.3770 | 0.6407 | 0.8108 |
| 0.3023 | 86.0 | 6192 | 0.4371 | 0.4241 | 0.6145 | 0.4405 | 0.1715 | 0.2241 | 0.3726 | 0.6370 | 0.8153 |
| 0.3051 | 87.0 | 6264 | 0.4327 | 0.4194 | 0.6136 | 0.4288 | 0.1692 | 0.2222 | 0.3798 | 0.6511 | 0.8170 |
| 0.3076 | 88.0 | 6336 | 0.4319 | 0.4175 | 0.6122 | 0.4262 | 0.1680 | 0.2215 | 0.3889 | 0.6534 | 0.8183 |
| 0.2981 | 89.0 | 6408 | 0.4374 | 0.4244 | 0.6136 | 0.4402 | 0.1716 | 0.2236 | 0.3728 | 0.6331 | 0.8140 |
| 0.3238 | 90.0 | 6480 | 0.4349 | 0.4222 | 0.6136 | 0.4371 | 0.1706 | 0.2233 | 0.3743 | 0.6418 | 0.8195 |
| 0.32 | 91.0 | 6552 | 0.4375 | 0.4240 | 0.6143 | 0.4417 | 0.1715 | 0.2242 | 0.3717 | 0.6379 | 0.8165 |
| 0.3087 | 92.0 | 6624 | 0.4421 | 0.4288 | 0.6162 | 0.4531 | 0.1739 | 0.2263 | 0.3652 | 0.6244 | 0.8125 |
| 0.3207 | 93.0 | 6696 | 0.4352 | 0.4216 | 0.6129 | 0.4376 | 0.1702 | 0.2231 | 0.3782 | 0.6406 | 0.8184 |
| 0.3064 | 94.0 | 6768 | 0.4398 | 0.4259 | 0.6148 | 0.4478 | 0.1727 | 0.2252 | 0.3685 | 0.6300 | 0.8147 |
| 0.3076 | 95.0 | 6840 | 0.4385 | 0.4258 | 0.6147 | 0.4446 | 0.1724 | 0.2246 | 0.3669 | 0.6321 | 0.8135 |
| 0.3181 | 96.0 | 6912 | 0.4393 | 0.4262 | 0.6150 | 0.4471 | 0.1728 | 0.2251 | 0.3663 | 0.6306 | 0.8147 |
| 0.2956 | 97.0 | 6984 | 0.4392 | 0.4271 | 0.6156 | 0.4470 | 0.1731 | 0.2252 | 0.3650 | 0.6297 | 0.8141 |
| 0.3026 | 98.0 | 7056 | 0.4390 | 0.4260 | 0.6151 | 0.4462 | 0.1726 | 0.2250 | 0.3669 | 0.6317 | 0.8144 |
| 0.329 | 99.0 | 7128 | 0.4362 | 0.4242 | 0.6156 | 0.4389 | 0.1713 | 0.2238 | 0.3716 | 0.6380 | 0.8156 |
| 0.3095 | 100.0 | 7200 | 0.4380 | 0.4255 | 0.6150 | 0.4444 | 0.1724 | 0.2247 | 0.3675 | 0.6329 | 0.8147 |
### Framework versions
- Transformers 4.24.0
- Pytorch 1.12.1+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-igbo | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 68 | null | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 278.11 +/- 15.41
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Davlan/xlm-roberta-base-finetuned-kinyarwanda | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 61 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert_sa_GLUE_Experiment_cola_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE COLA
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_cola_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE COLA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6180
- Matthews Correlation: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.647 | 1.0 | 34 | 0.6332 | 0.0 |
| 0.6203 | 2.0 | 68 | 0.6210 | 0.0 |
| 0.6092 | 3.0 | 102 | 0.6180 | 0.0 |
| 0.6077 | 4.0 | 136 | 0.6185 | 0.0 |
| 0.6083 | 5.0 | 170 | 0.6184 | 0.0 |
| 0.607 | 6.0 | 204 | 0.6185 | 0.0 |
| 0.6078 | 7.0 | 238 | 0.6186 | 0.0 |
| 0.6087 | 8.0 | 272 | 0.6184 | 0.0 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-lingala | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert_sa_GLUE_Experiment_cola_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE COLA
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_cola_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE COLA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6165
- Matthews Correlation: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.6103 | 1.0 | 34 | 0.6217 | 0.0 |
| 0.6077 | 2.0 | 68 | 0.6179 | 0.0 |
| 0.606 | 3.0 | 102 | 0.6182 | 0.0 |
| 0.6062 | 4.0 | 136 | 0.6165 | 0.0 |
| 0.5906 | 5.0 | 170 | 0.6183 | 0.0961 |
| 0.5491 | 6.0 | 204 | 0.6250 | 0.0495 |
| 0.512 | 7.0 | 238 | 0.6579 | 0.1173 |
| 0.4877 | 8.0 | 272 | 0.6908 | 0.1043 |
| 0.464 | 9.0 | 306 | 0.6860 | 0.1197 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-luganda | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 11 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert_sa_GLUE_Experiment_cola_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE COLA
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_cola_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE COLA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6140
- Matthews Correlation: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.6159 | 1.0 | 34 | 0.6201 | 0.0 |
| 0.6081 | 2.0 | 68 | 0.6188 | 0.0 |
| 0.6067 | 3.0 | 102 | 0.6185 | 0.0 |
| 0.6082 | 4.0 | 136 | 0.6197 | 0.0 |
| 0.6077 | 5.0 | 170 | 0.6180 | 0.0 |
| 0.6043 | 6.0 | 204 | 0.6140 | 0.0 |
| 0.5772 | 7.0 | 238 | 0.6189 | 0.0944 |
| 0.5369 | 8.0 | 272 | 0.6379 | 0.1201 |
| 0.5082 | 9.0 | 306 | 0.6448 | 0.0828 |
| 0.4948 | 10.0 | 340 | 0.6781 | 0.1243 |
| 0.4788 | 11.0 | 374 | 0.6972 | 0.1021 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-luo | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
metrics:
- rouge
model-index:
- name: t5-small-finetuned-dialogsum
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# t5-small-finetuned-dialogsum
This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 1.2771
- Rouge1: 36.5788
- Rouge2: 13.75
- Rougel: 30.9066
- Rougelsum: 32.8118
- Gen Len: 18.846
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 3
- eval_batch_size: 3
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 4
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
|:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
| 1.4705 | 1.0 | 4154 | 1.3514 | 34.3952 | 11.8123 | 28.9797 | 31.003 | 18.76 |
| 1.418 | 2.0 | 8308 | 1.3023 | 35.904 | 12.9905 | 30.3195 | 32.1809 | 18.83 |
| 1.3933 | 3.0 | 12462 | 1.2832 | 36.1796 | 13.6096 | 30.6577 | 32.5292 | 18.884 |
| 1.3875 | 4.0 | 16616 | 1.2771 | 36.5788 | 13.75 | 30.9066 | 32.8118 | 18.846 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Datasets 2.9.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-naija | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_mrpc_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MRPC
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.6887254901960784
- name: F1
type: f1
value: 0.7829059829059829
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mrpc_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5873
- Accuracy: 0.6887
- F1: 0.7829
- Combined Score: 0.7358
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:|
| 0.6677 | 1.0 | 15 | 0.6479 | 0.6838 | 0.8122 | 0.7480 |
| 0.6455 | 2.0 | 30 | 0.6395 | 0.6838 | 0.8122 | 0.7480 |
| 0.6399 | 3.0 | 45 | 0.6331 | 0.6838 | 0.8122 | 0.7480 |
| 0.6361 | 4.0 | 60 | 0.6288 | 0.6838 | 0.8122 | 0.7480 |
| 0.6352 | 5.0 | 75 | 0.6262 | 0.6838 | 0.8122 | 0.7480 |
| 0.6315 | 6.0 | 90 | 0.6252 | 0.6838 | 0.8122 | 0.7480 |
| 0.6331 | 7.0 | 105 | 0.6244 | 0.6838 | 0.8122 | 0.7480 |
| 0.6292 | 8.0 | 120 | 0.6242 | 0.6838 | 0.8122 | 0.7480 |
| 0.6314 | 9.0 | 135 | 0.6240 | 0.6838 | 0.8122 | 0.7480 |
| 0.6296 | 10.0 | 150 | 0.6242 | 0.6838 | 0.8122 | 0.7480 |
| 0.6306 | 11.0 | 165 | 0.6241 | 0.6838 | 0.8122 | 0.7480 |
| 0.63 | 12.0 | 180 | 0.6240 | 0.6838 | 0.8122 | 0.7480 |
| 0.6337 | 13.0 | 195 | 0.6240 | 0.6838 | 0.8122 | 0.7480 |
| 0.6299 | 14.0 | 210 | 0.6239 | 0.6838 | 0.8122 | 0.7480 |
| 0.6297 | 15.0 | 225 | 0.6230 | 0.6838 | 0.8122 | 0.7480 |
| 0.6248 | 16.0 | 240 | 0.6187 | 0.6838 | 0.8122 | 0.7480 |
| 0.6065 | 17.0 | 255 | 0.5999 | 0.6936 | 0.8164 | 0.7550 |
| 0.5624 | 18.0 | 270 | 0.6007 | 0.6838 | 0.7659 | 0.7249 |
| 0.5185 | 19.0 | 285 | 0.5891 | 0.6838 | 0.7772 | 0.7305 |
| 0.4664 | 20.0 | 300 | 0.5873 | 0.6887 | 0.7829 | 0.7358 |
| 0.4248 | 21.0 | 315 | 0.5893 | 0.6936 | 0.7764 | 0.7350 |
| 0.3844 | 22.0 | 330 | 0.5949 | 0.7010 | 0.7798 | 0.7404 |
| 0.3551 | 23.0 | 345 | 0.5942 | 0.7034 | 0.7866 | 0.7450 |
| 0.3314 | 24.0 | 360 | 0.6040 | 0.7034 | 0.7881 | 0.7458 |
| 0.3181 | 25.0 | 375 | 0.6162 | 0.7010 | 0.7867 | 0.7438 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-shona | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_mrpc_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MRPC
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.6813725490196079
- name: F1
type: f1
value: 0.8104956268221574
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mrpc_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5996
- Accuracy: 0.6814
- F1: 0.8105
- Combined Score: 0.7459
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:|
| 0.6343 | 1.0 | 15 | 0.6246 | 0.6838 | 0.8122 | 0.7480 |
| 0.6276 | 2.0 | 30 | 0.6234 | 0.6838 | 0.8122 | 0.7480 |
| 0.6306 | 3.0 | 45 | 0.6243 | 0.6838 | 0.8122 | 0.7480 |
| 0.6279 | 4.0 | 60 | 0.6205 | 0.6838 | 0.8122 | 0.7480 |
| 0.6168 | 5.0 | 75 | 0.5996 | 0.6814 | 0.8105 | 0.7459 |
| 0.5632 | 6.0 | 90 | 0.6020 | 0.6936 | 0.7954 | 0.7445 |
| 0.5021 | 7.0 | 105 | 0.6094 | 0.6936 | 0.7841 | 0.7389 |
| 0.4263 | 8.0 | 120 | 0.6844 | 0.6299 | 0.7113 | 0.6706 |
| 0.3476 | 9.0 | 135 | 0.7218 | 0.6373 | 0.7098 | 0.6735 |
| 0.2966 | 10.0 | 150 | 0.7759 | 0.7010 | 0.7953 | 0.7481 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-somali | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 8 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_mrpc_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MRPC
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.6887254901960784
- name: F1
type: f1
value: 0.7783595113438045
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mrpc_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5927
- Accuracy: 0.6887
- F1: 0.7784
- Combined Score: 0.7335
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:|
| 0.6412 | 1.0 | 15 | 0.6239 | 0.6838 | 0.8122 | 0.7480 |
| 0.6281 | 2.0 | 30 | 0.6238 | 0.6838 | 0.8122 | 0.7480 |
| 0.629 | 3.0 | 45 | 0.6239 | 0.6838 | 0.8122 | 0.7480 |
| 0.6296 | 4.0 | 60 | 0.6236 | 0.6838 | 0.8122 | 0.7480 |
| 0.6323 | 5.0 | 75 | 0.6228 | 0.6838 | 0.8122 | 0.7480 |
| 0.6272 | 6.0 | 90 | 0.6209 | 0.6838 | 0.8122 | 0.7480 |
| 0.6175 | 7.0 | 105 | 0.6000 | 0.6838 | 0.8122 | 0.7480 |
| 0.5733 | 8.0 | 120 | 0.5927 | 0.6887 | 0.7784 | 0.7335 |
| 0.5199 | 9.0 | 135 | 0.5969 | 0.6936 | 0.7818 | 0.7377 |
| 0.4423 | 10.0 | 150 | 0.6369 | 0.6765 | 0.7700 | 0.7233 |
| 0.3645 | 11.0 | 165 | 0.6708 | 0.6838 | 0.7832 | 0.7335 |
| 0.3203 | 12.0 | 180 | 0.7179 | 0.6446 | 0.7249 | 0.6847 |
| 0.2778 | 13.0 | 195 | 0.7517 | 0.6740 | 0.7726 | 0.7233 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-swahili | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 40 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: distilbert_sa_GLUE_Experiment_cola_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE COLA
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_cola_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE COLA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6175
- Matthews Correlation: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.6129 | 1.0 | 34 | 0.6212 | 0.0 |
| 0.6085 | 2.0 | 68 | 0.6175 | 0.0 |
| 0.6055 | 3.0 | 102 | 0.6199 | 0.0 |
| 0.588 | 4.0 | 136 | 0.6251 | 0.0706 |
| 0.5402 | 5.0 | 170 | 0.6351 | 0.1240 |
| 0.4995 | 6.0 | 204 | 0.6698 | 0.0812 |
| 0.4745 | 7.0 | 238 | 0.7124 | 0.0916 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-wolof | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_qnli_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
config: qnli
split: validation
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.6029654036243822
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qnli_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6564
- Accuracy: 0.6030
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.679 | 1.0 | 410 | 0.6614 | 0.5938 |
| 0.6496 | 2.0 | 820 | 0.6564 | 0.6030 |
| 0.6268 | 3.0 | 1230 | 0.6635 | 0.5978 |
| 0.6055 | 4.0 | 1640 | 0.6714 | 0.5933 |
| 0.5836 | 5.0 | 2050 | 0.6964 | 0.5913 |
| 0.5602 | 6.0 | 2460 | 0.7319 | 0.5832 |
| 0.5385 | 7.0 | 2870 | 0.7653 | 0.5718 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-xhosa | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"license:apache-2.0",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 12 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_qnli_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
config: qnli
split: validation
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.604978949295259
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qnli_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6582
- Accuracy: 0.6050
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6858 | 1.0 | 410 | 0.6653 | 0.6013 |
| 0.658 | 2.0 | 820 | 0.6582 | 0.6050 |
| 0.6395 | 3.0 | 1230 | 0.6607 | 0.6022 |
| 0.6229 | 4.0 | 1640 | 0.6699 | 0.6000 |
| 0.6087 | 5.0 | 2050 | 0.6770 | 0.5929 |
| 0.5946 | 6.0 | 2460 | 0.6980 | 0.5951 |
| 0.581 | 7.0 | 2870 | 0.7427 | 0.5854 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-yoruba | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 29 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_qnli_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
config: qnli
split: validation
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.604978949295259
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qnli_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6554
- Accuracy: 0.6050
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6802 | 1.0 | 410 | 0.6614 | 0.5988 |
| 0.6514 | 2.0 | 820 | 0.6554 | 0.6050 |
| 0.6306 | 3.0 | 1230 | 0.6610 | 0.5938 |
| 0.6105 | 4.0 | 1640 | 0.6700 | 0.5942 |
| 0.5925 | 5.0 | 2050 | 0.6833 | 0.5891 |
| 0.5725 | 6.0 | 2460 | 0.7225 | 0.5898 |
| 0.5537 | 7.0 | 2870 | 0.7806 | 0.5810 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-finetuned-zulu | [
"pytorch",
"xlm-roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"XLMRobertaForMaskedLM"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_mrpc_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MRPC
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.6838235294117647
- name: F1
type: f1
value: 0.8122270742358079
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mrpc_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6089
- Accuracy: 0.6838
- F1: 0.8122
- Combined Score: 0.7480
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:|
| 0.6363 | 1.0 | 15 | 0.6257 | 0.6838 | 0.8122 | 0.7480 |
| 0.6306 | 2.0 | 30 | 0.6230 | 0.6838 | 0.8122 | 0.7480 |
| 0.6302 | 3.0 | 45 | 0.6227 | 0.6838 | 0.8122 | 0.7480 |
| 0.6217 | 4.0 | 60 | 0.6089 | 0.6838 | 0.8122 | 0.7480 |
| 0.5729 | 5.0 | 75 | 0.6097 | 0.6838 | 0.7817 | 0.7328 |
| 0.4868 | 6.0 | 90 | 0.6395 | 0.6789 | 0.7791 | 0.7290 |
| 0.3906 | 7.0 | 105 | 0.7014 | 0.6838 | 0.7725 | 0.7282 |
| 0.3014 | 8.0 | 120 | 0.7773 | 0.6814 | 0.7735 | 0.7274 |
| 0.2538 | 9.0 | 135 | 0.8550 | 0.6789 | 0.7730 | 0.7259 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-ner-hrl | [
"pytorch",
"xlm-roberta",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"XLMRobertaForTokenClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 760 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_qnli_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
config: qnli
split: validation
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.6055280981145891
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qnli_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6542
- Accuracy: 0.6055
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6751 | 1.0 | 410 | 0.6575 | 0.6022 |
| 0.6476 | 2.0 | 820 | 0.6542 | 0.6055 |
| 0.6228 | 3.0 | 1230 | 0.6622 | 0.5982 |
| 0.5989 | 4.0 | 1640 | 0.6712 | 0.5894 |
| 0.5711 | 5.0 | 2050 | 0.7102 | 0.5845 |
| 0.5413 | 6.0 | 2460 | 0.7776 | 0.5772 |
| 0.5116 | 7.0 | 2870 | 0.8393 | 0.5678 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-base-wikiann-ner | [
"pytorch",
"tf",
"xlm-roberta",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"XLMRobertaForTokenClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 235 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- matthews_correlation
model-index:
- name: mobilebert_sa_GLUE_Experiment_cola_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE COLA
type: glue
config: cola
split: validation
args: cola
metrics:
- name: Matthews Correlation
type: matthews_correlation
value: 0.0
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mobilebert_sa_GLUE_Experiment_cola_256
This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE COLA dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6144
- Matthews Correlation: 0.0
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
|:-------------:|:-----:|:----:|:---------------:|:--------------------:|
| 0.6135 | 1.0 | 67 | 0.6178 | 0.0 |
| 0.6079 | 2.0 | 134 | 0.6178 | 0.0 |
| 0.6073 | 3.0 | 201 | 0.6181 | 0.0 |
| 0.6066 | 4.0 | 268 | 0.6167 | 0.0 |
| 0.6049 | 5.0 | 335 | 0.6144 | 0.0 |
| 0.5699 | 6.0 | 402 | 0.6194 | 0.1196 |
| 0.5015 | 7.0 | 469 | 0.6724 | 0.1179 |
| 0.4668 | 8.0 | 536 | 0.7723 | 0.1198 |
| 0.4425 | 9.0 | 603 | 0.7053 | 0.0810 |
| 0.4272 | 10.0 | 670 | 0.8389 | 0.1207 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-large-masakhaner | [
"pytorch",
"tf",
"xlm-roberta",
"token-classification",
"arxiv:2103.11811",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"XLMRobertaForTokenClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1,449 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_qqp_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QQP
type: glue
config: qqp
split: validation
args: qqp
metrics:
- name: Accuracy
type: accuracy
value: 0.7756863715063071
- name: F1
type: f1
value: 0.6671804469888803
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qqp_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QQP dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4818
- Accuracy: 0.7757
- F1: 0.6672
- Combined Score: 0.7214
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:|
| 0.5528 | 1.0 | 1422 | 0.5115 | 0.7528 | 0.6284 | 0.6906 |
| 0.4965 | 2.0 | 2844 | 0.4960 | 0.7614 | 0.6420 | 0.7017 |
| 0.4769 | 3.0 | 4266 | 0.4904 | 0.7650 | 0.6382 | 0.7016 |
| 0.4619 | 4.0 | 5688 | 0.4901 | 0.7680 | 0.6526 | 0.7103 |
| 0.4489 | 5.0 | 7110 | 0.4844 | 0.7709 | 0.6663 | 0.7186 |
| 0.4366 | 6.0 | 8532 | 0.4860 | 0.7721 | 0.6712 | 0.7217 |
| 0.425 | 7.0 | 9954 | 0.4860 | 0.7747 | 0.6636 | 0.7192 |
| 0.414 | 8.0 | 11376 | 0.4818 | 0.7757 | 0.6672 | 0.7214 |
| 0.4027 | 9.0 | 12798 | 0.4871 | 0.7786 | 0.6722 | 0.7254 |
| 0.3926 | 10.0 | 14220 | 0.4919 | 0.7749 | 0.6932 | 0.7340 |
| 0.3824 | 11.0 | 15642 | 0.4890 | 0.7801 | 0.6823 | 0.7312 |
| 0.3718 | 12.0 | 17064 | 0.4981 | 0.7801 | 0.6970 | 0.7385 |
| 0.3629 | 13.0 | 18486 | 0.4989 | 0.7805 | 0.6968 | 0.7386 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Davlan/xlm-roberta-large-ner-hrl | [
"pytorch",
"tf",
"xlm-roberta",
"token-classification",
"transformers",
"autotrain_compatible"
]
| token-classification | {
"architectures": [
"XLMRobertaForTokenClassification"
],
"model_type": "xlm-roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 1,322 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_qqp_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QQP
type: glue
config: qqp
split: validation
args: qqp
metrics:
- name: Accuracy
type: accuracy
value: 0.8029680930002473
- name: F1
type: f1
value: 0.7323432565015792
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qqp_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QQP dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4425
- Accuracy: 0.8030
- F1: 0.7323
- Combined Score: 0.7677
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:|
| 0.53 | 1.0 | 1422 | 0.5023 | 0.7557 | 0.6592 | 0.7075 |
| 0.479 | 2.0 | 2844 | 0.4823 | 0.7679 | 0.6483 | 0.7081 |
| 0.4522 | 3.0 | 4266 | 0.4788 | 0.7741 | 0.6474 | 0.7108 |
| 0.4263 | 4.0 | 5688 | 0.4753 | 0.7829 | 0.6911 | 0.7370 |
| 0.4009 | 5.0 | 7110 | 0.4536 | 0.7906 | 0.7194 | 0.7550 |
| 0.3772 | 6.0 | 8532 | 0.4497 | 0.7949 | 0.7200 | 0.7574 |
| 0.3548 | 7.0 | 9954 | 0.4453 | 0.8010 | 0.7201 | 0.7606 |
| 0.3332 | 8.0 | 11376 | 0.4425 | 0.8030 | 0.7323 | 0.7677 |
| 0.3132 | 9.0 | 12798 | 0.4654 | 0.7938 | 0.7375 | 0.7657 |
| 0.2951 | 10.0 | 14220 | 0.4551 | 0.8056 | 0.7423 | 0.7739 |
| 0.2777 | 11.0 | 15642 | 0.4675 | 0.8120 | 0.7374 | 0.7747 |
| 0.2625 | 12.0 | 17064 | 0.4946 | 0.8082 | 0.7451 | 0.7766 |
| 0.2473 | 13.0 | 18486 | 0.5041 | 0.8102 | 0.7469 | 0.7786 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Dawit/DialogGPT-small-ironman | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_qqp_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QQP
type: glue
config: qqp
split: validation
args: qqp
metrics:
- name: Accuracy
type: accuracy
value: 0.790972050457581
- name: F1
type: f1
value: 0.7234348921687338
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qqp_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QQP dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4568
- Accuracy: 0.7910
- F1: 0.7234
- Combined Score: 0.7572
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:|
| 0.5339 | 1.0 | 1422 | 0.5031 | 0.7551 | 0.6484 | 0.7018 |
| 0.4835 | 2.0 | 2844 | 0.4866 | 0.7650 | 0.6504 | 0.7077 |
| 0.4587 | 3.0 | 4266 | 0.4792 | 0.7694 | 0.6422 | 0.7058 |
| 0.4369 | 4.0 | 5688 | 0.4851 | 0.7745 | 0.6716 | 0.7230 |
| 0.4155 | 5.0 | 7110 | 0.4705 | 0.7791 | 0.6970 | 0.7380 |
| 0.3961 | 6.0 | 8532 | 0.4633 | 0.7858 | 0.7093 | 0.7476 |
| 0.3772 | 7.0 | 9954 | 0.4572 | 0.7908 | 0.7176 | 0.7542 |
| 0.3593 | 8.0 | 11376 | 0.4568 | 0.7910 | 0.7234 | 0.7572 |
| 0.3422 | 9.0 | 12798 | 0.4661 | 0.7927 | 0.7227 | 0.7577 |
| 0.3265 | 10.0 | 14220 | 0.4596 | 0.7983 | 0.7290 | 0.7636 |
| 0.3119 | 11.0 | 15642 | 0.4635 | 0.7977 | 0.7255 | 0.7616 |
| 0.2961 | 12.0 | 17064 | 0.4857 | 0.8008 | 0.7309 | 0.7659 |
| 0.2831 | 13.0 | 18486 | 0.4987 | 0.8037 | 0.7314 | 0.7676 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Dawn576/Dawn | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
tags:
- FrozenLake-v1-4x4-no_slippery
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-FrozenLake-v1-4x4-noSlippery
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: FrozenLake-v1-4x4-no_slippery
type: FrozenLake-v1-4x4-no_slippery
metrics:
- type: mean_reward
value: 1.00 +/- 0.00
name: mean_reward
verified: false
---
# **Q-Learning** Agent playing1 **FrozenLake-v1**
This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .
## Usage
```python
model = load_from_hub(repo_id="Bhaskarbha/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
```
|
Daymarebait/Discord_BOT_RICK | [
"conversational"
]
| conversational | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
tags:
- Taxi-v3
- q-learning
- reinforcement-learning
- custom-implementation
model-index:
- name: q-Taxi-v3
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Taxi-v3
type: Taxi-v3
metrics:
- type: mean_reward
value: 7.52 +/- 2.72
name: mean_reward
verified: false
---
# **Q-Learning** Agent playing1 **Taxi-v3**
This is a trained model of a **Q-Learning** agent playing **Taxi-v3** .
## Usage
```python
model = load_from_hub(repo_id="Bhaskarbha/q-Taxi-v3", filename="q-learning.pkl")
# Don't forget to check if you need to add additional attributes (is_slippery=False etc)
env = gym.make(model["env_id"])
```
|
Dayout/test | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
library_name: diffusers
pipeline_tag: text-to-image
datasets:
- hearmeneigh/e621-rising-v1-curated
tags:
- not-for-all-audiences
---
**Warning: THIS model is NOT suitable for use by minors. The model can/will generate X-rated/NFSW content.**
> This model has been deprecated by [version 2](https://huggingface.co/hearmeneigh/sd21-e621-rising-v2).
# E621 Rising: A Stable Diffusion 2.1 Model [epoch 19]
* Guaranteed **NSFW** or your money back
* Fine-tuned from [Stable Diffusion v2-1-base](https://huggingface.co/stabilityai/stable-diffusion-2-1-base)
* 19 epochs of 450,000 images each, collected from [E621](https://e621.net/) and curated based on scores, favorite counts, and tag filtering.
* Trained with [5,356 tags](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/blob/main/meta/tag-counts.json)
* `512x512px`
* Compatible with 🤗 `diffusers`
* Compatible with `stable-diffusion-webui`
* Likely compatible with anything that accepts [`.ckpt` and `.yaml` files](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/tree/main)
## Getting Started
* [Stable Diffusion WebUI How-To](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/blob/main/guides/WEBUI.md)
* [Python How-To](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/blob/main/guides/PYTHON.md)
## Examples
<img src="https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/resolve/main/guides/example-1.jpg" width="512" height="512">
<img src="https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/resolve/main/guides/example-2.jpg" width="512" height="512">
## Example Prompt
```
anthro solo female standing rating:questionable
species:equine biped
two_tone_fur grey_body grey_fur white_fur white_snout white_markings gloves_marking white_tail
blue_eyes facial_markings white_hair white_mane evil_grin
athletic_female
meta:shaded
meta:digital_media_artwork
meta:detailed
meta:digital_painting_artwork
seductive looking_at_viewer
tomboy
tomb raider outfit
```
## Changes From E621
See a [complete list of tags here](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/blob/main/meta/tag-counts.json).
* Symbols have been prefixed with `symbol:`, e.g. `symbol:<3`
* All categories except `general` have been prefixed with the category name, e.g. `copyright:somename`. The categories are:
* `artist`
* `copyright`
* `character`
* `species`
* `invalid`
* `meta`
* `lore`
* Tag names are all lowercase and only contain `a-z`, `0-9`, `/`, and `_` letters
* `:` is used to separate the category name from the tag
### Additional Tags
* Image rating
* `rating:explicit`
* `rating:questionable`
* `rating:safe`
### Omissions
Images with any of the following tags were omitted from training. No value judgment here, just needed to cull the E621 image library to a cost-efficient size.
The complete list of _included_ tags is [available here](https://huggingface.co/hearmeneigh/sd21-e621-rising-v1/blob/main/meta/tag-counts.json).
* `2_penises`
* `4_balls`
* `4_breasts`
* `6_arms`
* `6_breasts`
* `amputee`
* `baby`
* `character:fenneko`
* `character:fifi_la_fume`
* `character:frisk_undertale`
* `character:rouge_the_bat`
* `character:toriel`
* `child`
* `chubby_female`
* `chubby_gynomorph`
* `copyright:101_dalmatians`
* `copyright:adventure_time`
* `copyright:alien_franchise`
* `copyright:animal_crossing`
* `copyright:chikn_nuggit`
* `copyright:chip_n_dale_rescue_rangers`
* `copyright:conkers_bad_fur_day`
* `copyright:crash_team_racing_nitrofueled`
* `copyright:crash_team_racing_series`
* `copyright:cuphead_game`
* `copyright:digimon`
* `copyright:disgaea`
* `copyright:donkey_kong_series`
* `copyright:dragon_ball_z`
* `copyright:ducktales`
* `copyright:ducktales_2017`
* `copyright:family_guy`
* `copyright:five_nights_at_freddys`
* `copyright:friendship_is_magic`
* `copyright:how_to_train_your_dragon`
* `copyright:jurassic_park`
* `copyright:kelloggs`
* `copyright:lady_and_the_tramp`
* `copyright:lego`
* `copyright:looney_tunes`
* `copyright:magic_the_gathering`
* `copyright:mario_bros`
* `copyright:masters_of_the_universe`
* `copyright:minecraft`
* `copyright:mlp_g5`
* `copyright:ms_paint_adventures`
* `copyright:my_little_pony`
* `copyright:ocarina_of_time`
* `copyright:ori_and_the_blind_forest`
* `copyright:ori_series`
* `copyright:parappa_the_rapper`
* `copyright:pokemon`
* `copyright:regular_show`
* `copyright:rick_and_morty`
* `copyright:sam_and_max`
* `copyright:scoobydoo_series`
* `copyright:scottgames`
* `copyright:shirt_cut_meme`
* `copyright:sonic_the_hedgehog_series`
* `copyright:spongebob_squarepants`
* `copyright:star_trek`
* `copyright:star_wars`
* `copyright:starbound`
* `copyright:super_planet_dolan`
* `copyright:super_smash_bros`
* `copyright:swat_kats`
* `copyright:talespin`
* `copyright:team_cherry`
* `copyright:teen_titans`
* `copyright:teenage_mutant_ninja_turtles`
* `copyright:teenage_mutant_ninja_turtles_2022`
* `copyright:the_amazing_world_of_gumball`
* `copyright:the_legend_of_zelda`
* `copyright:tiny_toon_adventures`
* `copyright:tom_and_jerry`
* `copyright:twilight_princess`
* `copyright:um_jammer_lammy`
* `copyright:wayforward`
* `copyright:we_bare_bears`
* `copyright:winnie_the_pooh_franchise`
* `copyright:xcom`
* `copyright:yugioh`
* `cub`
* `death`
* `diaper`
* `expansion`
* `expression_sheet`
* `favorites:below_50`
* `feces`
* `feral`
* `feral_on_feral`
* `filth`
* `foot_fetish`
* `foot_focus`
* `gore`
* `huge_areola`
* `huge_butt`
* `huge_butt`
* `hyper`
* `hyper_anus`
* `hyper_balls`
* `hyper_belly`
* `hyper_breasts`
* `hyper_butt`
* `hyper_feet`
* `hyper_genitalia`
* `hyper_genitalia`
* `hyper_hips`
* `hyper_lips`
* `hyper_muscles`
* `hyper_nipples`
* `hyper_penis`
* `hyper_pregnancy`
* `hyper_pussy`
* `hyper_sheath`
* `hyper_thighs`
* `hyper_tongue`
* `imminent_death`
* `imminent_vore`
* `inflation`
* `loli`
* `meta:3d_artwork`
* `meta:comic`
* `meta:compression_artifacts`
* `meta:distracting_watermark`
* `meta:line_art`
* `meta:marker_artwork`
* `meta:model_sheet`
* `meta:monochrome`
* `meta:pen_artwork`
* `meta:pencil_artwork`
* `meta:sketch`
* `meta:sketch_page`
* `meta:unfinished`
* `micro`
* `moobs`
* `morbidly_obese`
* `nightmare_fuel`
* `obese`
* `overweight`
* `peeing`
* `plushophilia`
* `pooping`
* `pregnant`
* `scat`
* `score:below_25`
* `shota`
* `smelly`
* `snuff`
* `soiling`
* `species:animate_inanimate`
* `species:arachnid`
* `species:arachnid_humanoid`
* `species:avian`
* `species:eldritch_abomination`
* `species:food_creature`
* `species:insect`
* `species:insect_humanoid`
* `species:living_aircraft`
* `species:living_clothing`
* `species:living_fruit`
* `species:living_inflatable`
* `species:living_machine`
* `species:taur`
* `species:wasp`
* `square_crossover`
* `style_parody`
* `teats`
* `tentacles`
* `teratophilia`
* `toddler`
* `toony`
* `transformation`
* `udders`
* `unusual_anatomy`
* `unusual_genitalia`
* `unusual_genitalia_placement`
* `unusual_penis_placement`
* `urethral`
* `urethral_penetration`
* `urine_stream`
* `voluptuous`
* `vore`
* `watersports`
* `young`
## Training Procedure
* 204-272 images per batch (epoch variant)
* `512x512px` image size
* Adam optimizer
* Beta1 = `0.9`
* Beta2 = `0.999`
* Weight decay = `1e-2`
* Epsilon = `1e-08`
* Constant learning rate `4e-6`
* `bf16` mixed precision
* 8 epochs of samples stretched to `512x512px` (ignore aspect ratio)
* 9 epochs of samples resized to `512xH` or `Wx512px` with center crop (maintain aspect ratio)
* 2 epochs of samples resized to `< 512x512px` (maintain aspect ratio)
* Tags for each sample are shuffled for each epoch, starting from epoch 16 |
Dazai/Ko | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2023-01-25T05:05:07Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: distilbert_sa_GLUE_Experiment_qqp_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QQP
type: glue
config: qqp
split: validation
args: qqp
metrics:
- name: Accuracy
type: accuracy
value: 0.8082364580756863
- name: F1
type: f1
value: 0.7405200977275009
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_qqp_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE QQP dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4322
- Accuracy: 0.8082
- F1: 0.7405
- Combined Score: 0.7744
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:|
| 0.5251 | 1.0 | 1422 | 0.5016 | 0.7563 | 0.6686 | 0.7124 |
| 0.466 | 2.0 | 2844 | 0.4668 | 0.7745 | 0.6459 | 0.7102 |
| 0.4292 | 3.0 | 4266 | 0.4609 | 0.7854 | 0.6685 | 0.7270 |
| 0.3971 | 4.0 | 5688 | 0.4463 | 0.7945 | 0.7190 | 0.7568 |
| 0.3677 | 5.0 | 7110 | 0.4326 | 0.8001 | 0.7280 | 0.7641 |
| 0.3398 | 6.0 | 8532 | 0.4511 | 0.8017 | 0.7361 | 0.7689 |
| 0.3141 | 7.0 | 9954 | 0.4322 | 0.8082 | 0.7405 | 0.7744 |
| 0.2891 | 8.0 | 11376 | 0.4373 | 0.8096 | 0.7434 | 0.7765 |
| 0.266 | 9.0 | 12798 | 0.4793 | 0.7966 | 0.7440 | 0.7703 |
| 0.2433 | 10.0 | 14220 | 0.5018 | 0.8143 | 0.7503 | 0.7823 |
| 0.2235 | 11.0 | 15642 | 0.4917 | 0.8144 | 0.7428 | 0.7786 |
| 0.2045 | 12.0 | 17064 | 0.5152 | 0.8166 | 0.7521 | 0.7844 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Dazai/Ok | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: mobilebert_sa_GLUE_Experiment_mrpc_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MRPC
type: glue
config: mrpc
split: validation
args: mrpc
metrics:
- name: Accuracy
type: accuracy
value: 0.6911764705882353
- name: F1
type: f1
value: 0.7947882736156351
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mobilebert_sa_GLUE_Experiment_mrpc_256
This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE MRPC dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6111
- Accuracy: 0.6912
- F1: 0.7948
- Combined Score: 0.7430
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------------:|
| 0.6431 | 1.0 | 29 | 0.6261 | 0.6838 | 0.8122 | 0.7480 |
| 0.6296 | 2.0 | 58 | 0.6235 | 0.6838 | 0.8122 | 0.7480 |
| 0.6306 | 3.0 | 87 | 0.6237 | 0.6838 | 0.8122 | 0.7480 |
| 0.6297 | 4.0 | 116 | 0.6238 | 0.6838 | 0.8122 | 0.7480 |
| 0.6276 | 5.0 | 145 | 0.6207 | 0.6838 | 0.8122 | 0.7480 |
| 0.6197 | 6.0 | 174 | 0.6213 | 0.6838 | 0.8122 | 0.7480 |
| 0.6065 | 7.0 | 203 | 0.6284 | 0.6912 | 0.8043 | 0.7478 |
| 0.5258 | 8.0 | 232 | 0.6111 | 0.6912 | 0.7948 | 0.7430 |
| 0.4596 | 9.0 | 261 | 0.6506 | 0.7034 | 0.8052 | 0.7543 |
| 0.3953 | 10.0 | 290 | 0.7271 | 0.7034 | 0.7932 | 0.7483 |
| 0.3426 | 11.0 | 319 | 0.9509 | 0.6740 | 0.7542 | 0.7141 |
| 0.2821 | 12.0 | 348 | 1.0021 | 0.6863 | 0.7808 | 0.7335 |
| 0.2177 | 13.0 | 377 | 1.0359 | 0.6691 | 0.7676 | 0.7184 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Dbluciferm3737/Idk | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: mobilebert_sa_GLUE_Experiment_qnli_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QNLI
type: glue
config: qnli
split: validation
args: qnli
metrics:
- name: Accuracy
type: accuracy
value: 0.6082738422112393
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mobilebert_sa_GLUE_Experiment_qnli_256
This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE QNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6510
- Accuracy: 0.6083
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6764 | 1.0 | 819 | 0.6516 | 0.6112 |
| 0.6368 | 2.0 | 1638 | 0.6510 | 0.6083 |
| 0.6131 | 3.0 | 2457 | 0.6546 | 0.6158 |
| 0.5957 | 4.0 | 3276 | 0.6592 | 0.6101 |
| 0.5825 | 5.0 | 4095 | 0.6751 | 0.5993 |
| 0.5719 | 6.0 | 4914 | 0.6890 | 0.5993 |
| 0.5618 | 7.0 | 5733 | 0.7025 | 0.5907 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Dbluciferm3737/U | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | 2023-01-25T05:17:26Z | ---
library_name: stable-baselines3
tags:
- LunarLander-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: PPO
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: LunarLander-v2
type: LunarLander-v2
metrics:
- type: mean_reward
value: 252.86 +/- 19.71
name: mean_reward
verified: false
---
# **PPO** Agent playing **LunarLander-v2**
This is a trained model of a **PPO** agent playing **LunarLander-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Ddarkros/Test | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | Quite literally a dumpster, go look at https://huggingface.co/Aotsuyu/LoRA |
DeadBeast/korscm-mBERT | [
"pytorch",
"bert",
"text-classification",
"korean",
"dataset:Korean-Sarcasm",
"transformers",
"license:apache-2.0"
]
| text-classification | {
"architectures": [
"BertForSequenceClassification"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 43 | null | ---
library_name: stable-baselines3
tags:
- PandaReachDense-v2
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: A2C
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: PandaReachDense-v2
type: PandaReachDense-v2
metrics:
- type: mean_reward
value: -1.70 +/- 0.45
name: mean_reward
verified: false
---
# **A2C** Agent playing **PandaReachDense-v2**
This is a trained model of a **A2C** agent playing **PandaReachDense-v2**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
DeadBeast/roberta-base-pretrained-mr | [
"jax",
"roberta",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"RobertaForMaskedLM"
],
"model_type": "roberta",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 6 | null | ---
tags:
- generated_from_trainer
model-index:
- name: ko-jeolla-nmt-v2
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# ko-jeolla-nmt-v2
This model is a fine-tuned version of [leadawon/ko-jeolla-nmt-v1](https://huggingface.co/leadawon/ko-jeolla-nmt-v1) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.2911
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 96
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 1
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:----:|:---------------:|
| 0.2926 | 0.17 | 500 | 0.3283 |
| 0.3111 | 0.33 | 1000 | 0.3344 |
| 0.3009 | 0.5 | 1500 | 0.3138 |
| 0.2868 | 0.66 | 2000 | 0.3030 |
| 0.2653 | 0.83 | 2500 | 0.2969 |
| 0.2493 | 0.99 | 3000 | 0.2911 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Tokenizers 0.13.2
|
DecafNosebleed/DialoGPT-small-ScaraBot | [
"pytorch",
"gpt2",
"text-generation",
"transformers",
"conversational"
]
| conversational | {
"architectures": [
"GPT2LMHeadModel"
],
"model_type": "gpt2",
"task_specific_params": {
"conversational": {
"max_length": 1000
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 15 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- kp20k
metrics:
- rouge
model-index:
- name: keyphrase-extractions_bart-large
results:
- task:
name: Sequence-to-sequence Language Modeling
type: text2text-generation
dataset:
name: kp20k
type: kp20k
config: generation
split: train[:15%]
args: generation
metrics:
- name: Rouge1
type: rouge
value: 0.4713
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# keyphrase-extractions_bart-large
This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the kp20k dataset.
It achieves the following results on the evaluation set:
- Loss: 1.7257
- Rouge1: 0.4713
- Rouge2: 0.2385
- Rougel: 0.384
- Rougelsum: 0.3841
- Gen Len: 18.3164
- Phrase match: 0.1917
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 2e-05
- train_batch_size: 32
- eval_batch_size: 32
- seed: 42
- gradient_accumulation_steps: 2
- total_train_batch_size: 64
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 3
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | Phrase match |
|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|:------------:|
| 2.5104 | 1.0 | 730 | 1.8021 | 0.464 | 0.2336 | 0.3765 | 0.3766 | 18.9074 | 0.1784 |
| 1.8436 | 2.0 | 1460 | 1.7473 | 0.4709 | 0.2381 | 0.3834 | 0.3836 | 17.8127 | 0.1891 |
| 1.6864 | 3.0 | 2190 | 1.7257 | 0.4713 | 0.2385 | 0.384 | 0.3841 | 18.3164 | 0.1917 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Datasets 2.8.0
- Tokenizers 0.13.2
|
DecafNosebleed/ScaraBot | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null | ---
license: creativeml-openrail-m
pipeline_tag: text-to-image
---
### Kate from [Aim for the Stars](https://moringmark.tumblr.com/post/188798125438/aim-for-the-stars) on Anything V3.0 via Dreambooth
#### model by no3
This your Anything V3.0 model fine-tuned kate concept taught to Anything V3.0 with Dreambooth.
It can be used by modifying the `instance_prompt`: **sks_kate**
You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb).
And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts).
### note
If you want to to use in UI like [AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) or any UI that's uses .ckpt files just download one file or more from here for your convenience.
[kateA4-at3-beta1-pruned.ckpt](https://huggingface.co/no3/kate-at3-beta1/resolve/main/kateA4-at3-beta1.ckpt) 4.27 GB
[kateA4-at3-beta1-pruned.ckpt](https://huggingface.co/no3/kate-at3-beta1/resolve/main/kateA4-at3-beta1-pruned.ckpt) 2.13 GB Uses less storage space, but untested yet
If you have issues or questions feel free to visit the Community Tab and start discussion about it.
Here are images used for training this concept:
















 |
Declan/Breitbart_model_v1 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_rte_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE RTE
type: glue
config: rte
split: validation
args: rte
metrics:
- name: Accuracy
type: accuracy
value: 0.5270758122743683
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_rte_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE RTE dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6925
- Accuracy: 0.5271
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6932 | 1.0 | 10 | 0.6928 | 0.5271 |
| 0.6934 | 2.0 | 20 | 0.6927 | 0.5271 |
| 0.6934 | 3.0 | 30 | 0.6932 | 0.4729 |
| 0.6931 | 4.0 | 40 | 0.6930 | 0.5271 |
| 0.6936 | 5.0 | 50 | 0.6932 | 0.4440 |
| 0.6932 | 6.0 | 60 | 0.6927 | 0.5271 |
| 0.6932 | 7.0 | 70 | 0.6926 | 0.5271 |
| 0.6928 | 8.0 | 80 | 0.6932 | 0.4477 |
| 0.6935 | 9.0 | 90 | 0.6932 | 0.4260 |
| 0.6933 | 10.0 | 100 | 0.6925 | 0.5271 |
| 0.6929 | 11.0 | 110 | 0.6932 | 0.4440 |
| 0.693 | 12.0 | 120 | 0.6935 | 0.4729 |
| 0.6926 | 13.0 | 130 | 0.6931 | 0.5307 |
| 0.6916 | 14.0 | 140 | 0.6932 | 0.5199 |
| 0.6903 | 15.0 | 150 | 0.6943 | 0.4440 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/Breitbart_model_v2 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_sst2_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE SST2
type: glue
config: sst2
split: validation
args: sst2
metrics:
- name: Accuracy
type: accuracy
value: 0.7970183486238532
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_sst2_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE SST2 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4312
- Accuracy: 0.7970
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6044 | 1.0 | 264 | 0.5127 | 0.7569 |
| 0.363 | 2.0 | 528 | 0.4364 | 0.8016 |
| 0.29 | 3.0 | 792 | 0.4312 | 0.7970 |
| 0.2514 | 4.0 | 1056 | 0.4493 | 0.7993 |
| 0.2279 | 5.0 | 1320 | 0.4654 | 0.8050 |
| 0.2123 | 6.0 | 1584 | 0.4701 | 0.7970 |
| 0.201 | 7.0 | 1848 | 0.5154 | 0.7936 |
| 0.1904 | 8.0 | 2112 | 0.4989 | 0.8050 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/Breitbart_model_v5 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | Access to model Sootax/clap is restricted and you are not in the authorized list. Visit https://huggingface.co/Sootax/clap to ask for access. |
Declan/Breitbart_model_v6 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T06:22:59Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- spearmanr
model-index:
- name: distilbert_sa_GLUE_Experiment_stsb_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE STSB
type: glue
config: stsb
split: validation
args: stsb
metrics:
- name: Spearmanr
type: spearmanr
value: 0.01641373482271163
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_stsb_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE STSB dataset.
It achieves the following results on the evaluation set:
- Loss: 2.2501
- Pearson: 0.0145
- Spearmanr: 0.0164
- Combined Score: 0.0154
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:--------------:|
| 8.5764 | 1.0 | 23 | 6.5600 | -0.0093 | -0.0112 | -0.0102 |
| 7.7973 | 2.0 | 46 | 6.1824 | 0.0235 | 0.0229 | 0.0232 |
| 7.3288 | 3.0 | 69 | 5.7819 | -0.0634 | -0.0621 | -0.0628 |
| 6.8588 | 4.0 | 92 | 5.3627 | nan | nan | nan |
| 6.3722 | 5.0 | 115 | 4.9405 | nan | nan | nan |
| 5.8419 | 6.0 | 138 | 4.5257 | 0.0099 | 0.0107 | 0.0103 |
| 5.3405 | 7.0 | 161 | 4.1302 | nan | nan | nan |
| 4.8794 | 8.0 | 184 | 3.7607 | nan | nan | nan |
| 4.4156 | 9.0 | 207 | 3.4218 | -0.0075 | -0.0067 | -0.0071 |
| 3.991 | 10.0 | 230 | 3.1190 | 0.0246 | 0.0246 | 0.0246 |
| 3.6029 | 11.0 | 253 | 2.8558 | -0.0034 | -0.0006 | -0.0020 |
| 3.2636 | 12.0 | 276 | 2.6377 | nan | nan | nan |
| 2.9656 | 13.0 | 299 | 2.4660 | 0.0137 | 0.0129 | 0.0133 |
| 2.7028 | 14.0 | 322 | 2.3432 | nan | nan | nan |
| 2.4851 | 15.0 | 345 | 2.2710 | 0.0132 | 0.0145 | 0.0138 |
| 2.3576 | 16.0 | 368 | 2.2501 | 0.0145 | 0.0164 | 0.0154 |
| 2.2531 | 17.0 | 391 | 2.2773 | nan | nan | nan |
| 2.2045 | 18.0 | 414 | 2.3342 | -0.0082 | -0.0113 | -0.0098 |
| 2.1967 | 19.0 | 437 | 2.3460 | nan | nan | nan |
| 2.2041 | 20.0 | 460 | 2.3556 | -0.0025 | -0.0010 | -0.0017 |
| 2.1816 | 21.0 | 483 | 2.3715 | 0.0142 | 0.0160 | 0.0151 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/Breitbart_model_v7 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_rte_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE RTE
type: glue
config: rte
split: validation
args: rte
metrics:
- name: Accuracy
type: accuracy
value: 0.5270758122743683
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_rte_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE RTE dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6920
- Accuracy: 0.5271
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6951 | 1.0 | 10 | 0.6927 | 0.5271 |
| 0.6935 | 2.0 | 20 | 0.6925 | 0.5271 |
| 0.692 | 3.0 | 30 | 0.6931 | 0.5162 |
| 0.694 | 4.0 | 40 | 0.6932 | 0.5090 |
| 0.6923 | 5.0 | 50 | 0.6950 | 0.4729 |
| 0.6932 | 6.0 | 60 | 0.6921 | 0.5271 |
| 0.6926 | 7.0 | 70 | 0.6928 | 0.5235 |
| 0.6917 | 8.0 | 80 | 0.6929 | 0.5271 |
| 0.6896 | 9.0 | 90 | 0.6920 | 0.5271 |
| 0.6758 | 10.0 | 100 | 0.7009 | 0.4801 |
| 0.6273 | 11.0 | 110 | 0.7272 | 0.4946 |
| 0.5267 | 12.0 | 120 | 0.7684 | 0.5199 |
| 0.4413 | 13.0 | 130 | 0.8273 | 0.4946 |
| 0.3725 | 14.0 | 140 | 0.8790 | 0.4946 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/Breitbart_model_v8 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T06:28:18Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_wnli_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE WNLI
type: glue
config: wnli
split: validation
args: wnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5633802816901409
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_wnli_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE WNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6894
- Accuracy: 0.5634
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6929 | 1.0 | 3 | 0.6908 | 0.5634 |
| 0.6926 | 2.0 | 6 | 0.6914 | 0.5634 |
| 0.6934 | 3.0 | 9 | 0.6912 | 0.5634 |
| 0.6924 | 4.0 | 12 | 0.6900 | 0.5634 |
| 0.6935 | 5.0 | 15 | 0.6894 | 0.5634 |
| 0.6933 | 6.0 | 18 | 0.6895 | 0.5634 |
| 0.6932 | 7.0 | 21 | 0.6900 | 0.5634 |
| 0.6928 | 8.0 | 24 | 0.6908 | 0.5634 |
| 0.6937 | 9.0 | 27 | 0.6909 | 0.5634 |
| 0.6933 | 10.0 | 30 | 0.6912 | 0.5634 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/Breitbart_modelv7 | []
| null | {
"architectures": null,
"model_type": null,
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 0 | null |
---
license: cc-by-4.0
metrics:
- bleu4
- meteor
- rouge-l
- bertscore
- moverscore
language: es
datasets:
- lmqg/qg_esquad
pipeline_tag: text2text-generation
tags:
- question generation
- answer extraction
widget:
- text: "generate question: del <hl> Ministerio de Desarrollo Urbano <hl> , Gobierno de la India."
example_title: "Question Generation Example 1"
- text: "generate question: a <hl> noviembre <hl> , que es también la estación lluviosa."
example_title: "Question Generation Example 2"
- text: "generate question: como <hl> el gobierno de Abbott <hl> que asumió el cargo el 18 de septiembre de 2013."
example_title: "Question Generation Example 3"
- text: "extract answers: <hl> En la diáspora somalí, múltiples eventos islámicos de recaudación de fondos se llevan a cabo cada año en ciudades como Birmingham, Londres, Toronto y Minneapolis, donde los académicos y profesionales somalíes dan conferencias y responden preguntas de la audiencia. <hl> El propósito de estos eventos es recaudar dinero para nuevas escuelas o universidades en Somalia, para ayudar a los somalíes que han sufrido como consecuencia de inundaciones y / o sequías, o para reunir fondos para la creación de nuevas mezquitas como."
example_title: "Answer Extraction Example 1"
- text: "extract answers: <hl> Los estudiosos y los histori a dores están divididos en cuanto a qué evento señala el final de la era helenística. <hl> El período helenístico se puede ver que termina con la conquista final del corazón griego por Roma en 146 a. C. tras la guerra aquea, con la derrota final del reino ptolemaico en la batalla de Actium en 31 a. Helenístico se distingue de helénico en que el primero abarca toda la esfera de influencia griega antigua directa, mientras que el segundo se refiere a la propia Grecia."
example_title: "Answer Extraction Example 2"
model-index:
- name: lmqg/mbart-large-cc25-esquad-qg-ae
results:
- task:
name: Text2text Generation
type: text2text-generation
dataset:
name: lmqg/qg_esquad
type: default
args: default
metrics:
- name: BLEU4 (Question Generation)
type: bleu4_question_generation
value: 7.61
- name: ROUGE-L (Question Generation)
type: rouge_l_question_generation
value: 20.95
- name: METEOR (Question Generation)
type: meteor_question_generation
value: 19.58
- name: BERTScore (Question Generation)
type: bertscore_question_generation
value: 79.36
- name: MoverScore (Question Generation)
type: moverscore_question_generation
value: 56.05
- name: QAAlignedF1Score-BERTScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_f1_score_bertscore_question_answer_generation_with_gold_answer
value: 81.13
- name: QAAlignedRecall-BERTScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_recall_bertscore_question_answer_generation_with_gold_answer
value: 84.91
- name: QAAlignedPrecision-BERTScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_precision_bertscore_question_answer_generation_with_gold_answer
value: 77.75
- name: QAAlignedF1Score-MoverScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_f1_score_moverscore_question_answer_generation_with_gold_answer
value: 54.86
- name: QAAlignedRecall-MoverScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_recall_moverscore_question_answer_generation_with_gold_answer
value: 57.16
- name: QAAlignedPrecision-MoverScore (Question & Answer Generation (with Gold Answer))
type: qa_aligned_precision_moverscore_question_answer_generation_with_gold_answer
value: 52.82
- name: BLEU4 (Answer Extraction)
type: bleu4_answer_extraction
value: 21.5
- name: ROUGE-L (Answer Extraction)
type: rouge_l_answer_extraction
value: 46.66
- name: METEOR (Answer Extraction)
type: meteor_answer_extraction
value: 40.42
- name: BERTScore (Answer Extraction)
type: bertscore_answer_extraction
value: 86.7
- name: MoverScore (Answer Extraction)
type: moverscore_answer_extraction
value: 77.96
- name: AnswerF1Score (Answer Extraction)
type: answer_f1_score__answer_extraction
value: 70.95
- name: AnswerExactMatch (Answer Extraction)
type: answer_exact_match_answer_extraction
value: 52.81
---
# Model Card of `lmqg/mbart-large-cc25-esquad-qg-ae`
This model is fine-tuned version of [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) for question generation and answer extraction jointly on the [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation).
### Overview
- **Language model:** [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25)
- **Language:** es
- **Training data:** [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) (default)
- **Online Demo:** [https://autoqg.net/](https://autoqg.net/)
- **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation)
- **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992)
### Usage
- With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-)
```python
from lmqg import TransformersQG
# initialize model
model = TransformersQG(language="es", model="lmqg/mbart-large-cc25-esquad-qg-ae")
# model prediction
question_answer_pairs = model.generate_qa("a noviembre , que es también la estación lluviosa.")
```
- With `transformers`
```python
from transformers import pipeline
pipe = pipeline("text2text-generation", "lmqg/mbart-large-cc25-esquad-qg-ae")
# answer extraction
answer = pipe("generate question: del <hl> Ministerio de Desarrollo Urbano <hl> , Gobierno de la India.")
# question generation
question = pipe("extract answers: <hl> En la diáspora somalí, múltiples eventos islámicos de recaudación de fondos se llevan a cabo cada año en ciudades como Birmingham, Londres, Toronto y Minneapolis, donde los académicos y profesionales somalíes dan conferencias y responden preguntas de la audiencia. <hl> El propósito de estos eventos es recaudar dinero para nuevas escuelas o universidades en Somalia, para ayudar a los somalíes que han sufrido como consecuencia de inundaciones y / o sequías, o para reunir fondos para la creación de nuevas mezquitas como.")
```
## Evaluation
- ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qg-ae/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_esquad.default.json)
| | Score | Type | Dataset |
|:-----------|--------:|:--------|:-----------------------------------------------------------------|
| BERTScore | 79.36 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_1 | 22.05 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_2 | 14.55 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_3 | 10.34 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_4 | 7.61 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| METEOR | 19.58 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| MoverScore | 56.05 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| ROUGE_L | 20.95 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
- ***Metric (Question & Answer Generation)***: [raw metric file](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qg-ae/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qg_esquad.default.json)
| | Score | Type | Dataset |
|:--------------------------------|--------:|:--------|:-----------------------------------------------------------------|
| QAAlignedF1Score (BERTScore) | 81.13 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| QAAlignedF1Score (MoverScore) | 54.86 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| QAAlignedPrecision (BERTScore) | 77.75 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| QAAlignedPrecision (MoverScore) | 52.82 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| QAAlignedRecall (BERTScore) | 84.91 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| QAAlignedRecall (MoverScore) | 57.16 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
- ***Metric (Answer Extraction)***: [raw metric file](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qg-ae/raw/main/eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_esquad.default.json)
| | Score | Type | Dataset |
|:-----------------|--------:|:--------|:-----------------------------------------------------------------|
| AnswerExactMatch | 52.81 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| AnswerF1Score | 70.95 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| BERTScore | 86.7 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_1 | 32.77 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_2 | 28.12 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_3 | 24.52 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| Bleu_4 | 21.5 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| METEOR | 40.42 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| MoverScore | 77.96 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
| ROUGE_L | 46.66 | default | [lmqg/qg_esquad](https://huggingface.co/datasets/lmqg/qg_esquad) |
## Training hyperparameters
The following hyperparameters were used during fine-tuning:
- dataset_path: lmqg/qg_esquad
- dataset_name: default
- input_types: ['paragraph_answer', 'paragraph_sentence']
- output_types: ['question', 'answer']
- prefix_types: ['qg', 'ae']
- model: facebook/mbart-large-cc25
- max_length: 512
- max_length_output: 32
- epoch: 5
- batch: 2
- lr: 0.0001
- fp16: False
- random_seed: 1
- gradient_accumulation_steps: 32
- label_smoothing: 0.15
The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mbart-large-cc25-esquad-qg-ae/raw/main/trainer_config.json).
## Citation
```
@inproceedings{ushio-etal-2022-generative,
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
author = "Ushio, Asahi and
Alva-Manchego, Fernando and
Camacho-Collados, Jose",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, U.A.E.",
publisher = "Association for Computational Linguistics",
}
```
|
Declan/CNN_model_v1 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_sst2_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE SST2
type: glue
config: sst2
split: validation
args: sst2
metrics:
- name: Accuracy
type: accuracy
value: 0.7981651376146789
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_sst2_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE SST2 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4491
- Accuracy: 0.7982
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.5372 | 1.0 | 264 | 0.4524 | 0.8005 |
| 0.3132 | 2.0 | 528 | 0.4632 | 0.7913 |
| 0.2501 | 3.0 | 792 | 0.4491 | 0.7982 |
| 0.2176 | 4.0 | 1056 | 0.4819 | 0.7924 |
| 0.1963 | 5.0 | 1320 | 0.4784 | 0.7878 |
| 0.1793 | 6.0 | 1584 | 0.5427 | 0.7947 |
| 0.1651 | 7.0 | 1848 | 0.5849 | 0.7867 |
| 0.1515 | 8.0 | 2112 | 0.6103 | 0.7787 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/CNN_model_v2 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 5 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_mnli_96
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MNLI
type: glue
config: mnli
split: validation_matched
args: mnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5545158665581774
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mnli_96
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9288
- Accuracy: 0.5545
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 1.0498 | 1.0 | 1534 | 0.9988 | 0.5084 |
| 0.9757 | 2.0 | 3068 | 0.9532 | 0.5303 |
| 0.9458 | 3.0 | 4602 | 0.9435 | 0.5377 |
| 0.9272 | 4.0 | 6136 | 0.9306 | 0.5456 |
| 0.9122 | 5.0 | 7670 | 0.9305 | 0.5474 |
| 0.8992 | 6.0 | 9204 | 0.9294 | 0.5489 |
| 0.8867 | 7.0 | 10738 | 0.9260 | 0.5522 |
| 0.8752 | 8.0 | 12272 | 0.9319 | 0.5559 |
| 0.8645 | 9.0 | 13806 | 0.9336 | 0.5604 |
| 0.8545 | 10.0 | 15340 | 0.9200 | 0.5629 |
| 0.8443 | 11.0 | 16874 | 0.9200 | 0.5664 |
| 0.8338 | 12.0 | 18408 | 0.9298 | 0.5672 |
| 0.8252 | 13.0 | 19942 | 0.9383 | 0.5647 |
| 0.8168 | 14.0 | 21476 | 0.9428 | 0.5691 |
| 0.8084 | 15.0 | 23010 | 0.9325 | 0.5730 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/CNN_model_v3 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_rte_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE RTE
type: glue
config: rte
split: validation
args: rte
metrics:
- name: Accuracy
type: accuracy
value: 0.5270758122743683
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_rte_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE RTE dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6919
- Accuracy: 0.5271
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6944 | 1.0 | 10 | 0.6962 | 0.4729 |
| 0.6955 | 2.0 | 20 | 0.6919 | 0.5271 |
| 0.6929 | 3.0 | 30 | 0.6946 | 0.4729 |
| 0.6945 | 4.0 | 40 | 0.6922 | 0.5271 |
| 0.6922 | 5.0 | 50 | 0.6959 | 0.4729 |
| 0.6926 | 6.0 | 60 | 0.6922 | 0.5271 |
| 0.6921 | 7.0 | 70 | 0.6930 | 0.5126 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/CNN_model_v4 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_sst2_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE SST2
type: glue
config: sst2
split: validation
args: sst2
metrics:
- name: Accuracy
type: accuracy
value: 0.8004587155963303
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_sst2_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE SST2 dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4277
- Accuracy: 0.8005
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.5012 | 1.0 | 264 | 0.4277 | 0.8005 |
| 0.2971 | 2.0 | 528 | 0.5335 | 0.7833 |
| 0.2415 | 3.0 | 792 | 0.4466 | 0.8131 |
| 0.2092 | 4.0 | 1056 | 0.4814 | 0.8050 |
| 0.1881 | 5.0 | 1320 | 0.5214 | 0.8039 |
| 0.1685 | 6.0 | 1584 | 0.5085 | 0.8085 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/CNN_model_v5 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
license: apache-2.0
tags:
- generated_from_trainer
model-index:
- name: wav2vec2-base-timit-small
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# wav2vec2-base-timit-small
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset.
It achieves the following results on the evaluation set:
- Loss: 0.5361
- Wer: 0.3380
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 0.0001
- train_batch_size: 8
- eval_batch_size: 8
- seed: 42
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_steps: 1000
- num_epochs: 30
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Wer |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
| 3.571 | 1.0 | 500 | 1.9252 | 1.0022 |
| 0.8969 | 2.01 | 1000 | 0.5066 | 0.5292 |
| 0.4326 | 3.01 | 1500 | 0.4523 | 0.4562 |
| 0.2993 | 4.02 | 2000 | 0.4228 | 0.4202 |
| 0.2335 | 5.02 | 2500 | 0.4252 | 0.4178 |
| 0.2009 | 6.02 | 3000 | 0.4136 | 0.3910 |
| 0.1552 | 7.03 | 3500 | 0.4747 | 0.3863 |
| 0.1388 | 8.03 | 4000 | 0.4359 | 0.3859 |
| 0.1226 | 9.04 | 4500 | 0.4367 | 0.3879 |
| 0.1109 | 10.04 | 5000 | 0.4360 | 0.3760 |
| 0.0991 | 11.04 | 5500 | 0.4899 | 0.3672 |
| 0.0882 | 12.05 | 6000 | 0.4608 | 0.3653 |
| 0.0792 | 13.05 | 6500 | 0.4882 | 0.3703 |
| 0.0745 | 14.06 | 7000 | 0.4716 | 0.3625 |
| 0.065 | 15.06 | 7500 | 0.4896 | 0.3651 |
| 0.0596 | 16.06 | 8000 | 0.4831 | 0.3659 |
| 0.0563 | 17.07 | 8500 | 0.5092 | 0.3585 |
| 0.0536 | 18.07 | 9000 | 0.5376 | 0.3675 |
| 0.0465 | 19.08 | 9500 | 0.5019 | 0.3534 |
| 0.049 | 20.08 | 10000 | 0.4869 | 0.3723 |
| 0.0423 | 21.08 | 10500 | 0.4947 | 0.3501 |
| 0.0348 | 22.09 | 11000 | 0.5524 | 0.3453 |
| 0.0315 | 23.09 | 11500 | 0.5369 | 0.3499 |
| 0.0312 | 24.1 | 12000 | 0.5283 | 0.3519 |
| 0.0258 | 25.1 | 12500 | 0.5202 | 0.3461 |
| 0.0249 | 26.1 | 13000 | 0.5270 | 0.3449 |
| 0.0236 | 27.11 | 13500 | 0.5388 | 0.3408 |
| 0.0206 | 28.11 | 14000 | 0.5361 | 0.3388 |
| 0.0224 | 29.12 | 14500 | 0.5361 | 0.3380 |
### Framework versions
- Transformers 4.17.0
- Pytorch 1.13.1+cu116
- Datasets 1.18.3
- Tokenizers 0.13.2
|
Declan/CNN_model_v6 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T06:38:15Z | ---
license: creativeml-openrail-m
---
https://civitai.com/models/4867/abyssorangerobuttsmix2 |
Declan/FoxNews_model_v1 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T06:47:19Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
- f1
model-index:
- name: mobilebert_sa_GLUE_Experiment_qqp_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE QQP
type: glue
config: qqp
split: validation
args: qqp
metrics:
- name: Accuracy
type: accuracy
value: 0.7976007914914668
- name: F1
type: f1
value: 0.7297109826589595
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# mobilebert_sa_GLUE_Experiment_qqp_256
This model is a fine-tuned version of [google/mobilebert-uncased](https://huggingface.co/google/mobilebert-uncased) on the GLUE QQP dataset.
It achieves the following results on the evaluation set:
- Loss: 0.4349
- Accuracy: 0.7976
- F1: 0.7297
- Combined Score: 0.7637
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 128
- eval_batch_size: 128
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Combined Score |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:--------------:|
| 0.526 | 1.0 | 2843 | 0.5088 | 0.7492 | 0.6674 | 0.7083 |
| 0.4762 | 2.0 | 5686 | 0.4782 | 0.7695 | 0.6583 | 0.7139 |
| 0.4438 | 3.0 | 8529 | 0.4532 | 0.7847 | 0.6829 | 0.7338 |
| 0.4161 | 4.0 | 11372 | 0.4602 | 0.7869 | 0.7135 | 0.7502 |
| 0.3968 | 5.0 | 14215 | 0.4395 | 0.7955 | 0.7212 | 0.7583 |
| 0.3815 | 6.0 | 17058 | 0.4392 | 0.7985 | 0.7190 | 0.7587 |
| 0.3659 | 7.0 | 19901 | 0.4349 | 0.7976 | 0.7297 | 0.7637 |
| 0.352 | 8.0 | 22744 | 0.4419 | 0.8005 | 0.7300 | 0.7652 |
| 0.3399 | 9.0 | 25587 | 0.4454 | 0.7998 | 0.7317 | 0.7658 |
| 0.327 | 10.0 | 28430 | 0.4614 | 0.7995 | 0.7359 | 0.7677 |
| 0.3157 | 11.0 | 31273 | 0.4733 | 0.8000 | 0.7246 | 0.7623 |
| 0.3041 | 12.0 | 34116 | 0.4738 | 0.8041 | 0.7283 | 0.7662 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/FoxNews_model_v3 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_mnli_192
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MNLI
type: glue
config: mnli
split: validation_matched
args: mnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5665174938974776
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mnli_192
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.9073
- Accuracy: 0.5665
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 1.0297 | 1.0 | 1534 | 0.9819 | 0.5076 |
| 0.9555 | 2.0 | 3068 | 0.9376 | 0.5479 |
| 0.9202 | 3.0 | 4602 | 0.9282 | 0.5506 |
| 0.8951 | 4.0 | 6136 | 0.9175 | 0.5629 |
| 0.8759 | 5.0 | 7670 | 0.9040 | 0.5750 |
| 0.8587 | 6.0 | 9204 | 0.9110 | 0.5670 |
| 0.8422 | 7.0 | 10738 | 0.9196 | 0.5693 |
| 0.8261 | 8.0 | 12272 | 0.9521 | 0.5577 |
| 0.8114 | 9.0 | 13806 | 0.9293 | 0.5744 |
| 0.7967 | 10.0 | 15340 | 0.9075 | 0.5839 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/FoxNews_model_v4 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_wnli_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE WNLI
type: glue
config: wnli
split: validation
args: wnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5633802816901409
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_wnli_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE WNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6870
- Accuracy: 0.5634
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.6982 | 1.0 | 3 | 0.6870 | 0.5634 |
| 0.6948 | 2.0 | 6 | 0.6911 | 0.5634 |
| 0.6924 | 3.0 | 9 | 0.6917 | 0.5634 |
| 0.6954 | 4.0 | 12 | 0.6886 | 0.5634 |
| 0.6941 | 5.0 | 15 | 0.6893 | 0.5634 |
| 0.6917 | 6.0 | 18 | 0.6930 | 0.5634 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/FoxNews_model_v5 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 7 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_mnli_256
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MNLI
type: glue
config: mnli
split: validation_matched
args: mnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5885882831570383
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mnli_256
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8831
- Accuracy: 0.5886
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 1.0194 | 1.0 | 1534 | 0.9641 | 0.5250 |
| 0.9428 | 2.0 | 3068 | 0.9256 | 0.5586 |
| 0.9042 | 3.0 | 4602 | 0.9137 | 0.5684 |
| 0.8725 | 4.0 | 6136 | 0.8912 | 0.5849 |
| 0.8471 | 5.0 | 7670 | 0.8784 | 0.5930 |
| 0.8242 | 6.0 | 9204 | 0.8841 | 0.5932 |
| 0.8037 | 7.0 | 10738 | 0.8826 | 0.6006 |
| 0.784 | 8.0 | 12272 | 0.9013 | 0.5946 |
| 0.7647 | 9.0 | 13806 | 0.8934 | 0.6054 |
| 0.7468 | 10.0 | 15340 | 0.8993 | 0.6042 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/FoxNews_model_v6 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
library_name: stable-baselines3
tags:
- AntBulletEnv-v0
- deep-reinforcement-learning
- reinforcement-learning
- stable-baselines3
model-index:
- name: A2C
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: AntBulletEnv-v0
type: AntBulletEnv-v0
metrics:
- type: mean_reward
value: 1753.30 +/- 354.24
name: mean_reward
verified: false
---
# **A2C** Agent playing **AntBulletEnv-v0**
This is a trained model of a **A2C** agent playing **AntBulletEnv-v0**
using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
## Usage (with Stable-baselines3)
TODO: Add your code
```python
from stable_baselines3 import ...
from huggingface_sb3 import load_from_hub
...
```
|
Declan/HuffPost_model_v1 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | null | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- spearmanr
model-index:
- name: distilbert_sa_GLUE_Experiment_stsb_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE STSB
type: glue
config: stsb
split: validation
args: stsb
metrics:
- name: Spearmanr
type: spearmanr
value: 0.06351501126231118
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_stsb_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE STSB dataset.
It achieves the following results on the evaluation set:
- Loss: 2.3296
- Pearson: 0.0643
- Spearmanr: 0.0635
- Combined Score: 0.0639
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | Combined Score |
|:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:|:--------------:|
| 4.1667 | 1.0 | 23 | 2.3937 | 0.0211 | 0.0215 | 0.0213 |
| 2.1645 | 2.0 | 46 | 2.3296 | 0.0643 | 0.0635 | 0.0639 |
| 2.0445 | 3.0 | 69 | 2.5873 | 0.0574 | 0.0760 | 0.0667 |
| 1.9177 | 4.0 | 92 | 2.5104 | 0.1360 | 0.1374 | 0.1367 |
| 1.6933 | 5.0 | 115 | 2.4024 | 0.1910 | 0.2072 | 0.1991 |
| 1.4482 | 6.0 | 138 | 2.5412 | 0.2007 | 0.2127 | 0.2067 |
| 1.2485 | 7.0 | 161 | 2.5616 | 0.1943 | 0.2005 | 0.1974 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/HuffPost_model_v2 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T07:01:19Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_wnli_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE WNLI
type: glue
config: wnli
split: validation
args: wnli
metrics:
- name: Accuracy
type: accuracy
value: 0.5633802816901409
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_wnli_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE WNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.6861
- Accuracy: 0.5634
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:----:|:---------------:|:--------:|
| 0.7012 | 1.0 | 3 | 0.7047 | 0.4366 |
| 0.7043 | 2.0 | 6 | 0.6952 | 0.4366 |
| 0.6928 | 3.0 | 9 | 0.6861 | 0.5634 |
| 0.6998 | 4.0 | 12 | 0.6874 | 0.5634 |
| 0.6927 | 5.0 | 15 | 0.6957 | 0.4366 |
| 0.6952 | 6.0 | 18 | 0.7001 | 0.4366 |
| 0.6966 | 7.0 | 21 | 0.6927 | 0.5634 |
| 0.6917 | 8.0 | 24 | 0.6909 | 0.5634 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/HuffPost_model_v4 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T07:02:46Z | ---
tags:
- Pixelcopter-PLE-v0
- reinforce
- reinforcement-learning
- custom-implementation
- deep-rl-class
model-index:
- name: Reinforce-Pixelcopter-PLE-v0
results:
- task:
type: reinforcement-learning
name: reinforcement-learning
dataset:
name: Pixelcopter-PLE-v0
type: Pixelcopter-PLE-v0
metrics:
- type: mean_reward
value: 29.20 +/- 26.60
name: mean_reward
verified: false
---
# **Reinforce** Agent playing **Pixelcopter-PLE-v0**
This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** .
To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
|
Declan/HuffPost_model_v5 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 3 | 2023-01-25T07:05:01Z | ---
language:
- en
license: apache-2.0
tags:
- generated_from_trainer
datasets:
- glue
metrics:
- accuracy
model-index:
- name: distilbert_sa_GLUE_Experiment_mnli_384
results:
- task:
name: Text Classification
type: text-classification
dataset:
name: GLUE MNLI
type: glue
config: mnli
split: validation_matched
args: mnli
metrics:
- name: Accuracy
type: accuracy
value: 0.6144222945484134
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# distilbert_sa_GLUE_Experiment_mnli_384
This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset.
It achieves the following results on the evaluation set:
- Loss: 0.8561
- Accuracy: 0.6144
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 256
- eval_batch_size: 256
- seed: 10
- distributed_type: multi-GPU
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- num_epochs: 50
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|:-------------:|:-----:|:-----:|:---------------:|:--------:|
| 1.0075 | 1.0 | 1534 | 0.9587 | 0.5303 |
| 0.9233 | 2.0 | 3068 | 0.9005 | 0.5729 |
| 0.8749 | 3.0 | 4602 | 0.8834 | 0.5888 |
| 0.8389 | 4.0 | 6136 | 0.8564 | 0.6107 |
| 0.8058 | 5.0 | 7670 | 0.8487 | 0.6142 |
| 0.776 | 6.0 | 9204 | 0.8578 | 0.6220 |
| 0.7467 | 7.0 | 10738 | 0.8618 | 0.6187 |
| 0.7171 | 8.0 | 12272 | 0.8828 | 0.6207 |
| 0.6876 | 9.0 | 13806 | 0.8901 | 0.6292 |
| 0.6589 | 10.0 | 15340 | 0.8953 | 0.6219 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.14.0a0+410ce96
- Datasets 2.8.0
- Tokenizers 0.13.2
|
Declan/HuffPost_model_v6 | [
"pytorch",
"bert",
"fill-mask",
"transformers",
"autotrain_compatible"
]
| fill-mask | {
"architectures": [
"BertForMaskedLM"
],
"model_type": "bert",
"task_specific_params": {
"conversational": {
"max_length": null
},
"summarization": {
"early_stopping": null,
"length_penalty": null,
"max_length": null,
"min_length": null,
"no_repeat_ngram_size": null,
"num_beams": null,
"prefix": null
},
"text-generation": {
"do_sample": null,
"max_length": null
},
"translation_en_to_de": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_fr": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
},
"translation_en_to_ro": {
"early_stopping": null,
"max_length": null,
"num_beams": null,
"prefix": null
}
}
} | 9 | null | ---
tags:
- generated_from_trainer
model-index:
- name: jeolla-ko-nmt-v1
results: []
---
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
should probably proofread and complete it, then remove this comment. -->
# jeolla-ko-nmt-v1
This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset.
It achieves the following results on the evaluation set:
- Loss: 0.1559
## Model description
More information needed
## Intended uses & limitations
More information needed
## Training and evaluation data
More information needed
## Training procedure
### Training hyperparameters
The following hyperparameters were used during training:
- learning_rate: 5e-05
- train_batch_size: 24
- eval_batch_size: 24
- seed: 42
- gradient_accumulation_steps: 4
- total_train_batch_size: 96
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
- lr_scheduler_type: linear
- lr_scheduler_warmup_ratio: 0.1
- num_epochs: 5
- mixed_precision_training: Native AMP
### Training results
| Training Loss | Epoch | Step | Validation Loss |
|:-------------:|:-----:|:-----:|:---------------:|
| 0.6661 | 1.0 | 3017 | 0.4137 |
| 0.2839 | 2.0 | 6034 | 0.2249 |
| 0.1932 | 3.0 | 9051 | 0.1815 |
| 0.144 | 4.0 | 12068 | 0.1629 |
| 0.1159 | 5.0 | 15085 | 0.1559 |
### Framework versions
- Transformers 4.26.0
- Pytorch 1.13.1+cu116
- Tokenizers 0.13.2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.